diff --git a/bin/tasks/radar.js b/bin/tasks/radar.js index 4988b9c..cbb5029 100755 --- a/bin/tasks/radar.js +++ b/bin/tasks/radar.js @@ -113,7 +113,7 @@ const addRevisionToItem = (item = { body: '', info: '', }, revision) => { - let newItem = Object.assign(Object.assign(Object.assign({}, item), revision), { body: ignoreEmptyRevisionBody(revision, item) }); + let newItem = Object.assign(Object.assign(Object.assign({}, item), revision), { ring: revision.ring ? revision.ring : item.ring, body: ignoreEmptyRevisionBody(revision, item) }); if (revisionCreatesNewHistoryEntry(revision)) { newItem = Object.assign(Object.assign({}, newItem), { revisions: [revision, ...newItem.revisions] }); } diff --git a/src/rd.json_ b/src/rd.json_ index 1a7c18b..e861138 100644 --- a/src/rd.json_ +++ b/src/rd.json_ @@ -1 +1 @@ -{"items":[{"flag":"default","featured":true,"revisions":[{"name":"adr","release":"2018-03-01","title":"ADR","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/adr.md","body":"
Architecture Decision Records
\nADR is a lightweight documentation of important architecture decisions taken by the team.\nWithout documentation of the architecture and the architecture decisions, new team members can only do two things:
\nIt goes without saying that both options aren't right.
\nTherefore, we suggest documenting the important architecture decisions. We use a simple tool such as https://github.com/npryce/adr-tools and store them in version control.\nIn larger projects with many teams we also establish a regular "architecture board / COI" with regular meetings.\nOften, the architecture decisions are taken in such meetings.
\nThe main purpose of this documentation is to:
\nArchitecture Decision Records
\nADR is a lightweight documentation of important architecture decisions taken by the team.\nWithout documentation of the architecture and the architecture decisions, new team members can only do two things:
\nIt goes without saying that both options aren't right.
\nTherefore, we suggest documenting the important architecture decisions. We use a simple tool such as https://github.com/npryce/adr-tools and store them in version control.\nIn larger projects with many teams we also establish a regular "architecture board / COI" with regular meetings.\nOften, the architecture decisions are taken in such meetings.
\nThe main purpose of this documentation is to:
\nAkeneo is a Product Information Management system (also known as PIM, PCM or Product MDM) and helps centralize and harmonize all the technical and marketing information of products.
\nWe use Akeneo with success in our projects and products (For example in OM3), where it is responsible for:
\nThe system has a modern and friendly user interface and product managers find things such as completenesscheck, translation views and mass editing very helpful.
\nWith delta export and import capabilities and the usage of Mongo DB as persitence backend, the performance is acceptable. We miss a richer API - but the system is extendable and based on PHP/Symfony 2.
\n"}],"name":"akeneo","title":"akeneo.md","quadrant":"tools","body":"Akeneo is a Product Information Management system (also known as PIM, PCM or Product MDM) and helps centralize and harmonize all the technical and marketing information of products.
\nWe use Akeneo with success in our projects and products (For example in OM3), where it is responsible for:
\nThe system has a modern and friendly user interface and product managers find things such as completenesscheck, translation views and mass editing very helpful.
\nWith delta export and import capabilities and the usage of Mongo DB as persitence backend, the performance is acceptable. We miss a richer API - but the system is extendable and based on PHP/Symfony 2.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akeneo.md"},{"flag":"default","featured":true,"revisions":[{"name":"akka","release":"2017-03-01","title":"Akka","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/akka.md","body":"With the growing adoption of microservice-based architecures, the interest in frameworks and tools that make building systems that follow the reactive manifesto possible has increased.
\nAkka provides you a toolkit and runtime based on the Actor model known from Erlang to reach this goal.
\nIt's one of the most-adopted toolkits in its space with its key contributors beeing heavily involved in the overall movement of the reactive community as well.\nAt AOE, we use Akka when we need high-performance, efficient data processing or where its finite state machine plays nicely with the domain of the application. It is worth mentioning that the actor model might come with extra complexity and therefore should be used in problem spaces where the advantages of this approach bring enough value and no accidental complexity.
\n"}],"name":"akka","title":"Akka","ring":2,"quadrant":"languages-and-frameworks","body":"With the growing adoption of microservice-based architecures, the interest in frameworks and tools that make building systems that follow the reactive manifesto possible has increased.
\nAkka provides you a toolkit and runtime based on the Actor model known from Erlang to reach this goal.
\nIt's one of the most-adopted toolkits in its space with its key contributors beeing heavily involved in the overall movement of the reactive community as well.\nAt AOE, we use Akka when we need high-performance, efficient data processing or where its finite state machine plays nicely with the domain of the application. It is worth mentioning that the actor model might come with extra complexity and therefore should be used in problem spaces where the advantages of this approach bring enough value and no accidental complexity.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/akka.md"},{"flag":"changed","featured":false,"revisions":[{"name":"akka-streams","release":"2019-11-01","title":"Akka Streams","ring":1,"quadrant":"languages-and-frameworks","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akka-streams.md","body":"Updated to "adopt"
\n"},{"name":"akka-streams","release":"2018-03-01","title":"Akka Streams","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/akka-streams.md","body":"In our backend services, we frequently encounter the task to transform data\ncoming from and uploading to external sources and services.
\nBuilding more complex data transformation processes with Akka Actors has proven\nvery difficult for us in the past.
\nSeeing this data as a stream of elements could allow handling them piece by\npiece and only keeping as much of the data in-process as can currently be\nhandled.
\nAkka Streams is\na Reactive Streams implementation that\nprovides a very end-user friendly API for setting up streams for data\nprocessing that are bounded in resource usage and efficient. It uses the Akka\nActor Framework to execute these streams in an asynchronous and parallel\nfashion exploiting today's multi-core architectures without having the user to\ninteract with Actors directly. It handles things such as message resending in\nfailure cases and preventing message overflow. It is also interoperable with\nother Reactive Streams implementations.
\nOur first trials with Akka Streams were promising but we haven't yet implemented\ncomplex services with it.
\nWe will continue looking into it together with the\nAlpakka Connectors for integration\nwork.
\n"}],"name":"akka-streams","title":"Akka Streams","ring":1,"quadrant":"languages-and-frameworks","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akka-streams.md"},{"flag":"changed","featured":true,"revisions":[{"name":"alpakka","release":"2019-11-01","title":"Alpakka","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/alpakka.md","body":"Updated to "adopt"
\n"},{"name":"alpakka","release":"2018-03-01","title":"Alpakka","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/alpakka.md","body":"When using Akka Streams to build\nreactive data transformation services you usually need to connect to several\ndifferent services such as FTP, S3 buckets, AMQP brokers or different databases.
\nAlpakka provides\nintegration building blocks for Akka Streams to access these services in a\nreactive fashion and contains transformations for working with XML, CSV or\nJSON structured data.
\nCombined, Akka Streams and Alpakka enable us to build small reactive\nintegration services with minimal resource consumption and good performance, and\nare a good alternative to larger ESB solutions or integration tools.
\n"}],"name":"alpakka","title":"Alpakka","ring":1,"quadrant":"languages-and-frameworks","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/alpakka.md"},{"flag":"default","featured":true,"revisions":[{"name":"angular","release":"2018-03-01","title":"Angular","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/angular.md","body":"In addition to numerous major upgrades from version 2 to 5, which often needed a "hands-on" approach, a lot has happened in the Angular \necosystem in 2017. Specifically, the improvements in the HTTP-Client, which now requires less coding effort. Or \nthe vast improvements on angular.cli such as aot (ahead of time compile) for faster rendering, fewer requests and \nmuch smaller builds, to just name the most important ones.
\nWe have achieved particularly good results using Angular in large and medium-size projects. Actually, \nit's our framework-of-choice in our telecommunication sector teams as a single-page application framework (SPA) for microservice front \nends.
\nThe convenient scaffolding of unit- and end-to-end-tests provides a quality-driven workflow.\nAlso, the module- and component architecture helps to keep the codebase understandable end maintainable.
\n"},{"name":"angular","release":"2017-03-01","title":"Angular","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/angular.md","body":"The latest version of the Angular Framework, which is used for large single-page applications.
\nAngular is a complete rewrite of Angular 1 — many things have changed compared to the first version. The latest best practices and toolings from the JavaScript community have found their way into Angular.
\nIt supports DI (dependency injection), it has a clean inheritance and a good separation of concerns. Angular follows the web component standards to avoid negative side effects between components.
\nWe think that Angular is well-structured on both a development and an application level.
\nWhen talking about Angular, we must consider the angular.cli as well, which provides a huge level of intelligent automation along the development process and project setup.
\n"}],"name":"angular","title":"Angular","ring":2,"quadrant":"languages-and-frameworks","body":"In addition to numerous major upgrades from version 2 to 5, which often needed a "hands-on" approach, a lot has happened in the Angular \necosystem in 2017. Specifically, the improvements in the HTTP-Client, which now requires less coding effort. Or \nthe vast improvements on angular.cli such as aot (ahead of time compile) for faster rendering, fewer requests and \nmuch smaller builds, to just name the most important ones.
\nWe have achieved particularly good results using Angular in large and medium-size projects. Actually, \nit's our framework-of-choice in our telecommunication sector teams as a single-page application framework (SPA) for microservice front \nends.
\nThe convenient scaffolding of unit- and end-to-end-tests provides a quality-driven workflow.\nAlso, the module- and component architecture helps to keep the codebase understandable end maintainable.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/angular.md"},{"flag":"default","featured":false,"revisions":[{"name":"ant","release":"2017-03-01","title":"Ant","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/ant.md","body":"Apache Ant was build in 1997 to have something like Make in the C/C++ world for Java. Ant uses xml files to describe steps required to produce executable artifacts from source code. The main concepts of tasks and targets are programmable in an imperative style.
\nApache Ant was and is widely used by large software projects. Our recommendation is to stop using Apache Ant for new projects. If you are free to choose, we recommend Gradle as an Apache Ant replacement.
\n"}],"name":"ant","title":"ant.md","quadrant":"tools","body":"Apache Ant was build in 1997 to have something like Make in the C/C++ world for Java. Ant uses xml files to describe steps required to produce executable artifacts from source code. The main concepts of tasks and targets are programmable in an imperative style.
\nApache Ant was and is widely used by large software projects. Our recommendation is to stop using Apache Ant for new projects. If you are free to choose, we recommend Gradle as an Apache Ant replacement.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ant.md"},{"flag":"changed","featured":false,"revisions":[{"name":"anypoint-platform","release":"2019-11-01","featured":false,"title":"anypoint-platform.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/anypoint-platform.md","body":"Recently our teams migrated some project from anypoint to "Apache Camel" or use "Alpakka" for integration work.
\n"},{"name":"anypoint-platform","release":"2017-03-01","title":"Anypoint platform","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/anypoint-platform.md","body":"Anypoint platform (formally known as Mule or Mule ESB) is an Enterprise Integration Platform written in Java.
\nAnypoint provide tools to use Enterprise Integration Patterns (EAI) and has a high number of ready-to-use connectors to communicate with software tools such as SAP, Salesforce, etc.
\nAnypoint Community Version is Open Source and contribution is possible. The platform is pluggable with own connectors. Mulesoft is also driving the raml specification and related Open Source tools.
\nAOE is a Mulesoft Partner and we use both the Community and Enterprise Versions of Anypoint. We use Anypoint as an API Gateway to combine and transform data from multiple backends. We use it as ESB or Integration platform for loose coupling of software components. And we also use it as legacy modernization to provide modern APIs for legacy- or foreign software.
\n"}],"name":"anypoint-platform","title":"anypoint-platform.md","quadrant":"tools","body":"Recently our teams migrated some project from anypoint to "Apache Camel" or use "Alpakka" for integration work.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/anypoint-platform.md"},{"flag":"new","featured":true,"revisions":[{"name":"aoe-sso","release":"2019-11-01","title":"AOE SSO","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aoe-sso.md","body":"To improve security and user experience we decided to install an organisation wide SSO and use OpenID Connect integrate with existing tools.\nWe use Keycloak as the SSO server, which is backed by our LDAP.\nThis also helps to implement new infrastructure security based on "BeyondCorp".
\n"}],"name":"aoe-sso","title":"AOE SSO","ring":1,"quadrant":"platforms-and-aoe-services","body":"To improve security and user experience we decided to install an organisation wide SSO and use OpenID Connect integrate with existing tools.\nWe use Keycloak as the SSO server, which is backed by our LDAP.\nThis also helps to implement new infrastructure security based on "BeyondCorp".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aoe-sso.md"},{"flag":"new","featured":false,"revisions":[{"name":"apache-camel","release":"2019-11-01","title":"Apache Camel","ring":2,"quadrant":"languages-and-frameworks","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apache-camel.md","body":""Camel" is an open source integration framework that empowers you to quickly and easily integrate various systems consuming or producing data.
\nOur teams are using Apache Camel as API Gateway that offers APIs and takes care of Federation to various Backends as well as Authorisation tasks.
\n"}],"name":"apache-camel","title":"Apache Camel","ring":2,"quadrant":"languages-and-frameworks","body":""Camel" is an open source integration framework that empowers you to quickly and easily integrate various systems consuming or producing data.
\nOur teams are using Apache Camel as API Gateway that offers APIs and takes care of Federation to various Backends as well as Authorisation tasks.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apache-camel.md"},{"flag":"default","featured":true,"revisions":[{"name":"api-first-design-approach","release":"2017-03-01","title":"API-First Design Approach","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/api-first-design-approach.md","body":"The API-First Design Approach puts the API design at the beginning of the implementation without any constraints, for example, from the current IT infrastructure or the implementation itself. The idea is to design the API in a way that it serves its purpose best and the consumers are enabled to work efficiently.
\nThere are several advantages to this approach. For example, it can help to avoid reflecting the internal structure of the application or any internal constraints. Furthermore, as one of the most important design aspects is consistency, one can define features such as the behavior of security, URL schemes, and API keys upfront. It also helps speed up parallel implementation. A team that consumes the API can start working directly after the API design because it can easily be mocked.
\nThere are several tools for modelling an API, but here at AOE we mainly use RAML as it provides a rich set of tools for generating documentation, mocking and more. For mocking we use Wiremock, for example.
\nRelated to the "API-First" approach is the "Headless" approach where an existing application (with or without existing API) is used as a backend for a separate frontend. We used this with sucess for Magento-based E-Commerce platforms. This allows encapsulating the core features of that application, while integrating it into a larger landscape of components using its API as a unified way to interact between components. Decoupling the core logic from its presentation layer allows picking the best technology stack for the various parts independently.
\nFor further reading see:
\n\n"}],"name":"api-first-design-approach","title":"API-First Design Approach","ring":2,"quadrant":"methods-and-patterns","body":"The API-First Design Approach puts the API design at the beginning of the implementation without any constraints, for example, from the current IT infrastructure or the implementation itself. The idea is to design the API in a way that it serves its purpose best and the consumers are enabled to work efficiently.
\nThere are several advantages to this approach. For example, it can help to avoid reflecting the internal structure of the application or any internal constraints. Furthermore, as one of the most important design aspects is consistency, one can define features such as the behavior of security, URL schemes, and API keys upfront. It also helps speed up parallel implementation. A team that consumes the API can start working directly after the API design because it can easily be mocked.
\nThere are several tools for modelling an API, but here at AOE we mainly use RAML as it provides a rich set of tools for generating documentation, mocking and more. For mocking we use Wiremock, for example.
\nRelated to the "API-First" approach is the "Headless" approach where an existing application (with or without existing API) is used as a backend for a separate frontend. We used this with sucess for Magento-based E-Commerce platforms. This allows encapsulating the core features of that application, while integrating it into a larger landscape of components using its API as a unified way to interact between components. Decoupling the core logic from its presentation layer allows picking the best technology stack for the various parts independently.
\nFor further reading see:
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/api-first-design-approach.md"},{"flag":"new","featured":true,"revisions":[{"name":"apollo-client","release":"2019-11-01","title":"Apollo Client","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apollo-client.md","body":"The Apollo Client is a tool to efficiently work together with an GraphQL server. \nIt makes it easy to run your queries and mutations, cache results, brings tooling to download schemas and generate types to name a few of the useful features.
\n"}],"name":"apollo-client","title":"Apollo Client","ring":2,"quadrant":"tools","body":"The Apollo Client is a tool to efficiently work together with an GraphQL server. \nIt makes it easy to run your queries and mutations, cache results, brings tooling to download schemas and generate types to name a few of the useful features.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apollo-client.md"},{"flag":"default","featured":true,"revisions":[{"name":"artifactory","release":"2018-03-01","title":"Artifactory","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/artifactory.md","body":"Artifactory is now used in every newly started project at AOE and plays a\ncentral role as an artifact repository for libraries, applications and docker\nimages. While cleanup is still an issue, we recommend the adoption of an\nartifact repository in all our projects.
\n"},{"name":"artifactory","release":"2017-03-01","title":"Artifactory","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/artifactory.md","body":"JFrog Artifactory is a software tool, which, in the end, manages and stores (binary) artifacts.\nIn addition to storage, it provides a managing interface, which also allows to store build information, properties as well as dependencies per artifact which are organized within repositories. A fine grained security system enables easy management of which artifacts are available to whom.\nThe artifacts are exposed via an HTTP(S)-Url Artifactory, which can generate package-manager compatible manifests for the repositories. AOE utilizes Artifactory to serve Maven, Apt, Npm, Composer and Docker Repositories.
\nIn addition to storing own assets, Artifactory is able to proxy remote Repository for and cache resolved artifacts locally.\nThis results in an increased build performance and decouples builds from external service dependencies and ensures builds still work even if they utilize outdated dependencies that might not be publicly available anymore.
\nArtifactory provides a powerful REST-API for managing Artifacts including a powerful search AQL. It is utilized to provide complex release processes based on QA-Attributes on an artifact level.
\nArtifactory at AOE currently comes with some problems, too:
\nAOE is using the Professional version for a central instance that can be used by different teams. We encourage teams to use Artifactory instead of Jenkins to store and manage build artifacts - and to take care of cleaning up old artifacts automatically.
\n"}],"name":"artifactory","title":"Artifactory","ring":1,"quadrant":"platforms-and-aoe-services","body":"Artifactory is now used in every newly started project at AOE and plays a\ncentral role as an artifact repository for libraries, applications and docker\nimages. While cleanup is still an issue, we recommend the adoption of an\nartifact repository in all our projects.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/artifactory.md"},{"flag":"default","featured":false,"revisions":[{"name":"asciidoc","release":"2018-03-01","title":"AsciiDoc","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/asciidoc.md","body":"AsciiDoc is a lightweight markup language such as Markdown. \nWith a concise Syntax, it supports more features than Markdown without extensions such as Tables and Table of Contents.\nIt's easy to write complex documentation with AsciiDoc. And with Asciidoctor you can export your text to Pdf, HTML, etc.
\nAt AOE, we use AsciiDoc for Documentation in our Repositories.
\n"}],"name":"asciidoc","title":"asciidoc.md","quadrant":"tools","body":"AsciiDoc is a lightweight markup language such as Markdown. \nWith a concise Syntax, it supports more features than Markdown without extensions such as Tables and Table of Contents.\nIt's easy to write complex documentation with AsciiDoc. And with Asciidoctor you can export your text to Pdf, HTML, etc.
\nAt AOE, we use AsciiDoc for Documentation in our Repositories.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/asciidoc.md"},{"flag":"default","featured":false,"revisions":[{"name":"aws-lambda","release":"2017-03-01","title":"AWS Lambda","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/aws-lambda.md","body":"AWS Lambda is one of the exciting new "cloud-native" / serverless ways to run code without worrying about infrastructure. While it is possible to directly respond to web requests using the API Gateway, our teams are currently using AWS Lambda mostly for tasks outside the critical path. As a custom resource for CloudFormation, it allows us to manage all aspects of a deployment in an elegant way by simply deploying a new CloudFormation stack. Baking AMIs and doing green/blue switches are only two of the many use cases where AWS Lambda comes in very handy.
\nIn addition to deployment automation, we're using AWS Lambda to process incoming data. Being able to respond to events from various sources such as S3 Buckets, SNS topics, Kinesis streams and HTTP endpoints it's a perfect match to process, transform and forward incoming data in near-realtime at a fraction of the cost of running an ESB.
\n"}],"name":"aws-lambda","title":"aws-lambda.md","quadrant":"platforms-and-aoe-services","body":"AWS Lambda is one of the exciting new "cloud-native" / serverless ways to run code without worrying about infrastructure. While it is possible to directly respond to web requests using the API Gateway, our teams are currently using AWS Lambda mostly for tasks outside the critical path. As a custom resource for CloudFormation, it allows us to manage all aspects of a deployment in an elegant way by simply deploying a new CloudFormation stack. Baking AMIs and doing green/blue switches are only two of the many use cases where AWS Lambda comes in very handy.
\nIn addition to deployment automation, we're using AWS Lambda to process incoming data. Being able to respond to events from various sources such as S3 Buckets, SNS topics, Kinesis streams and HTTP endpoints it's a perfect match to process, transform and forward incoming data in near-realtime at a fraction of the cost of running an ESB.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aws-lambda.md"},{"flag":"default","featured":true,"revisions":[{"name":"axure","release":"2018-03-01","title":"Axure","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/axure.md","body":"Axure is a tool that enables the creation of flowcharts, wireframes, mockups, user journeys and more.\nThrough features such as conditional logic, dynamic content and animations it is possible to create highly functional and rich UI prototypes, which convey a realistic look and feel as to how the application to be developed should behave and look.
\nWe at AOE have used Axure successfully in several projects and it helped us a lot, particularly:
\nIn conclusion, Axure is a great tool that provides all stakeholders with a common understanding and helped us a lot to specify requirements and find their implications.
\n"}],"name":"axure","title":"Axure","ring":2,"quadrant":"tools","body":"Axure is a tool that enables the creation of flowcharts, wireframes, mockups, user journeys and more.\nThrough features such as conditional logic, dynamic content and animations it is possible to create highly functional and rich UI prototypes, which convey a realistic look and feel as to how the application to be developed should behave and look.
\nWe at AOE have used Axure successfully in several projects and it helped us a lot, particularly:
\nIn conclusion, Axure is a great tool that provides all stakeholders with a common understanding and helped us a lot to specify requirements and find their implications.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/axure.md"},{"flag":"default","featured":true,"revisions":[{"name":"babel","release":"2018-03-01","title":"Babel","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/babel.md","body":"We have been using babel for some time now. Since we have started using it, we don't have to\nstruggle with unimplemented features of ECMAScript. In this regard, JavaScript is\nJavaScript, no matter what browser you are using. We we strongly recommend \nusing Babel or similar solutions (e.g. TypeScript).
\n"},{"name":"babel","release":"2017-03-01","title":"Babel","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/babel.md","body":"Babel gives you the possibility to use the latest features from JavaScript (ECMAScript) in the browser of your choice.
\nWithout Babel you had to use the feature set of your oldest browser or use feature detections such as modernizr or write polyfills on your own.
\nIn general, Babel is split in 2 ways to bring you the new goodies you want.
\nNew syntax will be compiled to old EcmaScript 5 code e.g.:
\n\nNew globals and functions are provided by babel-polyfill e.g.:
\nThe configuration is really simple due to the plugin system. You can choose which ECMAScript version and stage presets you want to use.
\nTo know what you need you can practice ECMAScript 6 by doing it with es6katas and ask caniuse.
\nIf you are using TypeScript, Babel is not necessary since you already get the new features with TypeScript.
\n"}],"name":"babel","title":"Babel","ring":1,"quadrant":"languages-and-frameworks","body":"We have been using babel for some time now. Since we have started using it, we don't have to\nstruggle with unimplemented features of ECMAScript. In this regard, JavaScript is\nJavaScript, no matter what browser you are using. We we strongly recommend \nusing Babel or similar solutions (e.g. TypeScript).
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/babel.md"},{"flag":"new","featured":true,"revisions":[{"name":"beyondcorp","release":"2019-11-01","title":"BeyondCorp","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/beyondcorp.md","body":"BeyondCorp is a Zero Trust framework that evolved at Google.\nWith the surge of cloud technologies and micro services the network perimeter is ever disappearing. \nThis provides challenges for authentication of subjects that used to heavily rely on network segments.\nWith Zero Trust no assumption is made about how far something can be trusted, everything is untrusted by default and authentication and authorisation happens all the time, not just once.\nWhile network segments and VPN connections may still have relevance in specific areas AOE is increasingly implementing BeyondCorp in all its components and services with implementing OAuth and OpenID Connect.
\n"}],"name":"beyondcorp","title":"BeyondCorp","ring":2,"quadrant":"methods-and-patterns","body":"BeyondCorp is a Zero Trust framework that evolved at Google.\nWith the surge of cloud technologies and micro services the network perimeter is ever disappearing. \nThis provides challenges for authentication of subjects that used to heavily rely on network segments.\nWith Zero Trust no assumption is made about how far something can be trusted, everything is untrusted by default and authentication and authorisation happens all the time, not just once.\nWhile network segments and VPN connections may still have relevance in specific areas AOE is increasingly implementing BeyondCorp in all its components and services with implementing OAuth and OpenID Connect.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/beyondcorp.md"},{"flag":"default","featured":false,"revisions":[{"name":"blameless-post-mortems","release":"2018-03-01","title":"Blameless Post Mortems","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/blameless-post-mortems.md","body":"\n\nFailure and invention are inseparable twins.
\n— Jeff Bezos
\n
Blameless Post Mortems provide a concept of dealing with failures that inevitably occur when developing and operating complex software solutions. After any major incident or outage, the team gets together to perform an in-depth analysis of what happened and what can be done to mitigate the risk of similar issues happening in the future.
\nBased on trust, and under the assumption that every person involved had good intentions to do the best-possible job given the information at hand, Blameless Post Mortems provide an opportunity to continuously improve the quality of software and infrastructure and the processes to deal with critical situations.
\nThe post mortem documentation usually consists of both a timeline of the events leading to an incident and the steps taken to its remediation, as well as future actions and learnings for increasing reslience and stability of our services.
\nAt AOE, we strive to conduct a Blameless Post Mortem meeting after every user-visible incident.
\n"}],"name":"blameless-post-mortems","title":"blameless-post-mortems.md","quadrant":"methods-and-patterns","body":"\n\nFailure and invention are inseparable twins.
\n— Jeff Bezos
\n
Blameless Post Mortems provide a concept of dealing with failures that inevitably occur when developing and operating complex software solutions. After any major incident or outage, the team gets together to perform an in-depth analysis of what happened and what can be done to mitigate the risk of similar issues happening in the future.
\nBased on trust, and under the assumption that every person involved had good intentions to do the best-possible job given the information at hand, Blameless Post Mortems provide an opportunity to continuously improve the quality of software and infrastructure and the processes to deal with critical situations.
\nThe post mortem documentation usually consists of both a timeline of the events leading to an incident and the steps taken to its remediation, as well as future actions and learnings for increasing reslience and stability of our services.
\nAt AOE, we strive to conduct a Blameless Post Mortem meeting after every user-visible incident.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/blameless-post-mortems.md"},{"flag":"default","featured":false,"revisions":[{"name":"bower","release":"2017-03-01","title":"Bower","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/bower.md","body":"Bower is a package manager for frontend resources such as JavaScript libraries and CSS frameworks. Compared to npm, it has a somewhat different approach to loading and resolving the packages, resulting in a smaller and cleaner folder structure.
\nIn small web projects, this approach is good and sufficient, but larger projects will need more dependencies such as task runners or testing frameworks, which are not available through Bower. As most of the frontend libraries are also available through npm, it's not suprising that we ask ourselves why Bower is still needed.
\nAt AOE, we decided to use npm as the only package manager to avoid having multiple tools doing similar things. Developers only need to deal with one solution, which makes the project easier to maintain.
\n"}],"name":"bower","title":"bower.md","quadrant":"tools","body":"Bower is a package manager for frontend resources such as JavaScript libraries and CSS frameworks. Compared to npm, it has a somewhat different approach to loading and resolving the packages, resulting in a smaller and cleaner folder structure.
\nIn small web projects, this approach is good and sufficient, but larger projects will need more dependencies such as task runners or testing frameworks, which are not available through Bower. As most of the frontend libraries are also available through npm, it's not suprising that we ask ourselves why Bower is still needed.
\nAt AOE, we decided to use npm as the only package manager to avoid having multiple tools doing similar things. Developers only need to deal with one solution, which makes the project easier to maintain.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/bower.md"},{"flag":"default","featured":true,"revisions":[{"name":"client-side-error-logging","release":"2017-03-01","title":"Client-side error logging","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/client-side-error-logging.md","body":"More and more business logic is done client-side with various web and app technologies. How do we know if everything works in production? We can easily track backend exceptions in the server logs, but what about client-side errors in the user's browser or mobile app?
\nWith client-side error logging, we send errors to a central server to see instantly what is going wrong. With this method errors can be found and resolved quickly before they affect even more users.
\nAt AOE, we use the Open Source solution Sentry.io. It can handle multiple projects and teams and integrates well with other services such as Mattemost/Slack and Issue Tracking Systems.
\n"}],"name":"client-side-error-logging","title":"Client-side error logging","ring":2,"quadrant":"methods-and-patterns","body":"More and more business logic is done client-side with various web and app technologies. How do we know if everything works in production? We can easily track backend exceptions in the server logs, but what about client-side errors in the user's browser or mobile app?
\nWith client-side error logging, we send errors to a central server to see instantly what is going wrong. With this method errors can be found and resolved quickly before they affect even more users.
\nAt AOE, we use the Open Source solution Sentry.io. It can handle multiple projects and teams and integrates well with other services such as Mattemost/Slack and Issue Tracking Systems.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/client-side-error-logging.md"},{"flag":"new","featured":true,"revisions":[{"name":"cockpit","release":"2019-11-01","title":"Cockpit","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cockpit.md","body":"Cockpit is a self-hosted headless and api-driven content management system.
\n"}],"name":"cockpit","title":"Cockpit","ring":3,"quadrant":"tools","body":"Cockpit is a self-hosted headless and api-driven content management system.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cockpit.md"},{"flag":"new","featured":true,"revisions":[{"name":"concourse-ci","release":"2019-11-01","title":"Concourse","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/concourse-ci.md","body":"Concourse is an open-source continuous "thing-doer". It represents a general approach for automation which can be used for Continuous Integration and Continuous Delivery. Concourse CI follows a strict concept that is based on idempotency, immutability, declarative config, stateless workers, and reproducible builds. Pipelines are built on the mechanics of resources, tasks and jobs, which are all configured in one or multiple YAML files. \nConcourse claims to be "simple" but has a steep learning curve in the beginning till it gets simple to use.\nConcourse is used in the Congstar Team to automate infrastructure deployments.
\n"}],"name":"concourse-ci","title":"Concourse","ring":2,"quadrant":"tools","body":"Concourse is an open-source continuous "thing-doer". It represents a general approach for automation which can be used for Continuous Integration and Continuous Delivery. Concourse CI follows a strict concept that is based on idempotency, immutability, declarative config, stateless workers, and reproducible builds. Pipelines are built on the mechanics of resources, tasks and jobs, which are all configured in one or multiple YAML files. \nConcourse claims to be "simple" but has a steep learning curve in the beginning till it gets simple to use.\nConcourse is used in the Congstar Team to automate infrastructure deployments.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/concourse-ci.md"},{"flag":"default","featured":false,"revisions":[{"name":"consul","release":"2017-03-01","title":"Consul","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/consul.md","body":"Consul is a lightweight service to provide a service discovery registry with failure detection (health checks) for circuit breakers. It also provides configuration management with key/value storage.\\\nThe typical way to use it is that a consul master cluster takes care of the update and write processes and consul clients run locally on the apps host - data is shared accross the complete Consul cluster. The data can be accessed by using DNS and HTTP APIs.
\nAt AOE, we use Consul for settings distribution with consul-template as a way to do Settings Injection during deployment. Consul is also used as service discovery between apps inside microservice environments.
\nWith Vault there is another tool that can be used to manage and share secrets.
\n"}],"name":"consul","title":"consul.md","quadrant":"tools","body":"Consul is a lightweight service to provide a service discovery registry with failure detection (health checks) for circuit breakers. It also provides configuration management with key/value storage.\\\nThe typical way to use it is that a consul master cluster takes care of the update and write processes and consul clients run locally on the apps host - data is shared accross the complete Consul cluster. The data can be accessed by using DNS and HTTP APIs.
\nAt AOE, we use Consul for settings distribution with consul-template as a way to do Settings Injection during deployment. Consul is also used as service discovery between apps inside microservice environments.
\nWith Vault there is another tool that can be used to manage and share secrets.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/consul.md"},{"flag":"changed","featured":true,"revisions":[{"name":"container-based-builds","release":"2019-11-01","title":"Container-based builds","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/container-based-builds.md","body":"Updated to "adopt". Container based builds has getting to the defacto standard for our pipelines in Gitlab or other CI Tools.
\n"},{"name":"container-based-builds","release":"2017-03-01","title":"Container-based builds","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/container-based-builds.md","body":"Running your builds in isolated containers keeps your build servers clean. It allows you to even run them with multiple versions of a framework or programming language. You don't need additional machines like you would for running builds with PHP5 or PHP7 at the same time or running some legacy builds.
\nNote that you need to think about some kind of caching mechanism for your depenendies to avoid downloading them in every build, which would cause long build times.
\nAt AOE, we are currently starting to use this approach for building services and it is especially useful if your build has special dependencies. Also, it's possible to use GitLab as a build tool or use Docker with the new Jenkinspipeline. For caching we are evaluating minio as a cache server. We noticed that our builds run quite rapidly and reliably with that. Also, the complexity of the builds decreased since we don't need any workarounds, which were caused by having everything installed on one build server.
\n"}],"name":"container-based-builds","title":"Container-based builds","ring":1,"quadrant":"methods-and-patterns","body":"Updated to "adopt". Container based builds has getting to the defacto standard for our pipelines in Gitlab or other CI Tools.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/container-based-builds.md"},{"flag":"default","featured":true,"revisions":[{"name":"crc","release":"2018-03-01","title":"CRC Games","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/crc.md","body":"Class Responsibility Collaboration Card Games are a method to discuss and align the software design - especially useful for object-oriented software.
\nA proper software design is one of the most important things to ensure the sucess and the maintainability of your software.\nEspecially for iterative development methods, where you work on a software task by task, it is important to have designs sessions that also look forward to the next iterations and the conceptional whole.
\nAnd for software design to be sucessfull, it is very important that everybody (in the team) has the same understanding of the design and stands behind it.
\nCRC sessions help to design and align the high-level object design and collaboration of your system with the whole team. During such sessions new team members can learn from the experience and explanations of tropers.
\nThis is how we often conduct a CRC Session:
\nClass Responsibility Collaboration Card Games are a method to discuss and align the software design - especially useful for object-oriented software.
\nA proper software design is one of the most important things to ensure the sucess and the maintainability of your software.\nEspecially for iterative development methods, where you work on a software task by task, it is important to have designs sessions that also look forward to the next iterations and the conceptional whole.
\nAnd for software design to be sucessfull, it is very important that everybody (in the team) has the same understanding of the design and stands behind it.
\nCRC sessions help to design and align the high-level object design and collaboration of your system with the whole team. During such sessions new team members can learn from the experience and explanations of tropers.
\nThis is how we often conduct a CRC Session:
\nCypress is a new front-end testing tool (end2end). It comes as a simple node package and is therefore easy to use and maintain for front-end developers and testers. Cypress has a different approach than selenium, it runs in the browser and in the same loop as the device under test.
\nGood:
\nNot so good:
\nExample of a test :
\ndescribe('My First Test', function() {\n it('Visits the Kitchen Sink', function() {\n cy.visit('https://example.cypress.io')\n\n cy.contains('type').click()\n\n cy.url().should('include', '/commands/actions')\n\n cy.get('.action-email')\n .type('fake@email.com')\n .should('have.value', 'fake@email.com')\n })\n})\n\n"}],"name":"cypress","title":"Cypress","ring":3,"quadrant":"tools","body":"Cypress is a new front-end testing tool (end2end). It comes as a simple node package and is therefore easy to use and maintain for front-end developers and testers. Cypress has a different approach than selenium, it runs in the browser and in the same loop as the device under test.
\nGood:
\nNot so good:
\nExample of a test :
\ndescribe('My First Test', function() {\n it('Visits the Kitchen Sink', function() {\n cy.visit('https://example.cypress.io')\n\n cy.contains('type').click()\n\n cy.url().should('include', '/commands/actions')\n\n cy.get('.action-email')\n .type('fake@email.com')\n .should('have.value', 'fake@email.com')\n })\n})\n\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cypress.md"},{"flag":"default","featured":false,"revisions":[{"name":"dagger","release":"2017-03-01","title":"Dagger","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/dagger.md","body":"Dagger is a fully static, compile-time dependency injection framework for both Java and Android. Dagger doesn't use reflections at runtime, it saves resources. For us, it is a perfect match for Android development.
\nWe at AOE use it as a base framework for every Android project.
\n"}],"name":"dagger","title":"dagger.md","quadrant":"tools","body":"Dagger is a fully static, compile-time dependency injection framework for both Java and Android. Dagger doesn't use reflections at runtime, it saves resources. For us, it is a perfect match for Android development.
\nWe at AOE use it as a base framework for every Android project.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dagger.md"},{"flag":"default","featured":false,"revisions":[{"name":"datadog","release":"2017-03-01","title":"Datadog","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/datadog.md","body":"After realizing that AWS CloudWatch isn't flexible enough, and running our own metrics aggregation, monitoring and altering isn't something we want to do ourselves, we decided to give Datadog a try. Datadog is very simple to set up and retrieves metrics from the AWS API (and many other integrations) and from an agent running on the EC2 instances. On top of that, it comes with many plugins for services such as Apache, NGINX and ElasticSearch, allowing us to track all important metrics without much effort. Creating dashboards, setting up alarms and integrating into other applications (such as ticket systems) is easy to do and works fine.
\n"}],"name":"datadog","title":"datadog.md","quadrant":"platforms-and-aoe-services","body":"After realizing that AWS CloudWatch isn't flexible enough, and running our own metrics aggregation, monitoring and altering isn't something we want to do ourselves, we decided to give Datadog a try. Datadog is very simple to set up and retrieves metrics from the AWS API (and many other integrations) and from an agent running on the EC2 instances. On top of that, it comes with many plugins for services such as Apache, NGINX and ElasticSearch, allowing us to track all important metrics without much effort. Creating dashboards, setting up alarms and integrating into other applications (such as ticket systems) is easy to do and works fine.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/datadog.md"},{"flag":"default","featured":false,"revisions":[{"name":"decoupling-infrastructure-via-messaging","release":"2017-03-01","title":"Decoupling Infrastructure via Messaging","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/decoupling-infrastructure-via-messaging.md","body":"In Microservices we have already covered the trend that modern architectures are moving away more and more from big monolithic applications to distributed software suites. The result of splitting our software and infrastructure in smaller parts, is the need to communicate with each other. This can be done by direct communication or by message-based asynchronouous communication. While synchronuous communication allows for more plannable "real-time" response times of the overall systems, asynchronouos communication increases the resilience and stability of the system significantly and allows one to use other integration and scaling patterns. However, it often comes with additional complexity.
\nMost of the IaaS Cloud providers offer messaging services such as AWS SQS which provide the possibility to decouple our infrastructure via Messaging. Also, we use RabbitMQ as a Messaging and Broker solution within our applications. The decision of using messaging and messaging patterns as an integration strategy can be made as part of strategic design considerations.
\n"}],"name":"decoupling-infrastructure-via-messaging","title":"decoupling-infrastructure-via-messaging.md","quadrant":"methods-and-patterns","body":"In Microservices we have already covered the trend that modern architectures are moving away more and more from big monolithic applications to distributed software suites. The result of splitting our software and infrastructure in smaller parts, is the need to communicate with each other. This can be done by direct communication or by message-based asynchronouous communication. While synchronuous communication allows for more plannable "real-time" response times of the overall systems, asynchronouos communication increases the resilience and stability of the system significantly and allows one to use other integration and scaling patterns. However, it often comes with additional complexity.
\nMost of the IaaS Cloud providers offer messaging services such as AWS SQS which provide the possibility to decouple our infrastructure via Messaging. Also, we use RabbitMQ as a Messaging and Broker solution within our applications. The decision of using messaging and messaging patterns as an integration strategy can be made as part of strategic design considerations.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/decoupling-infrastructure-via-messaging.md"},{"flag":"new","featured":true,"revisions":[{"name":"dependency-update-scan","release":"2019-11-01","title":"Dependency Update Scan","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dependency-update-scan.md","body":"Automated dependency scans are useful to remove the manual task of regularly checking for version updates.\nOur teams are utilizing the Open Source bots Renovate and Scala Steward, both of which are running as a scheduled GitLab job in our internal infrastructure.\nThe bots are regularly creating merge requests with dependency version updates against our projects.
\nHaving this automated comes with a few advantages:
\nAutomated merge requests allow us to focus on reviewing, testing and prioritization of dependency version updates with considerably less effort.
\n"}],"name":"dependency-update-scan","title":"Dependency Update Scan","ring":3,"quadrant":"methods-and-patterns","body":"Automated dependency scans are useful to remove the manual task of regularly checking for version updates.\nOur teams are utilizing the Open Source bots Renovate and Scala Steward, both of which are running as a scheduled GitLab job in our internal infrastructure.\nThe bots are regularly creating merge requests with dependency version updates against our projects.
\nHaving this automated comes with a few advantages:
\nAutomated merge requests allow us to focus on reviewing, testing and prioritization of dependency version updates with considerably less effort.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dependency-update-scan.md"},{"flag":"default","featured":true,"revisions":[{"name":"devops-practices","release":"2017-03-01","title":"Devops practices","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/devops-practices.md","body":"DevOps is a term that has been around for some years now. We understand DevOps as a philosophy and culture with related practices and tools - all with the aim of bringing (IT) Operations closer to Development.
\nJez Humble described the devops movement like this: "a cross-functional community of practice dedicated to the study of building, evolving and operating rapidly changing, secure, resilient systems at scale".
\nWith the size of software projects and the effects of agile development, the need to also deliver operation and infrastructure in an agile way increases more and more.
\nWe have been using the following practices with success:
\nCrossfunctional Teams "you build it, you run it"
\nIn the past year, we have moved from a more centralistic or standanlone IT and operations service team to crossfunctional teams with Infrastructure experts working in and with the development team (admins joining the project team).
\nAnd, we changed to crossfunctional teams and a "you build it, you run it" approach for the bigger projects. We have seen that this leads to the following positive effects:
\nAs always, we are establishing "community of interests" to improve and promote the knowledge transfer between different teams.
\nIncrease of relevant tools
\nAnother important aspect and also enabler of DevOps practices is the increase of certain tool and methods - some of them are also represented in the Tech Radar. For example: Puppet Environments; Docker; Cloud Services, Terraform, Consul etc.
\nDevSetup = Prod Setup, Infrastructure as a Code
\nKeeping the development infrastructure setup close to production is also a commonly implemented practice and a direct result of the "Infrastructure as Code" method. Handling infrastructure and the required changes and innovations in ways similar to those used for applications is important; you can ready more about this here: Infrastructure as Code
\nWe encourage all teams to adopt devops practices in the teams and to take care that there is a true collaboration between the different experts in a team and no invisible wall.
\n"}],"name":"devops-practices","title":"Devops practices","ring":1,"quadrant":"methods-and-patterns","body":"DevOps is a term that has been around for some years now. We understand DevOps as a philosophy and culture with related practices and tools - all with the aim of bringing (IT) Operations closer to Development.
\nJez Humble described the devops movement like this: "a cross-functional community of practice dedicated to the study of building, evolving and operating rapidly changing, secure, resilient systems at scale".
\nWith the size of software projects and the effects of agile development, the need to also deliver operation and infrastructure in an agile way increases more and more.
\nWe have been using the following practices with success:
\nCrossfunctional Teams "you build it, you run it"
\nIn the past year, we have moved from a more centralistic or standanlone IT and operations service team to crossfunctional teams with Infrastructure experts working in and with the development team (admins joining the project team).
\nAnd, we changed to crossfunctional teams and a "you build it, you run it" approach for the bigger projects. We have seen that this leads to the following positive effects:
\nAs always, we are establishing "community of interests" to improve and promote the knowledge transfer between different teams.
\nIncrease of relevant tools
\nAnother important aspect and also enabler of DevOps practices is the increase of certain tool and methods - some of them are also represented in the Tech Radar. For example: Puppet Environments; Docker; Cloud Services, Terraform, Consul etc.
\nDevSetup = Prod Setup, Infrastructure as a Code
\nKeeping the development infrastructure setup close to production is also a commonly implemented practice and a direct result of the "Infrastructure as Code" method. Handling infrastructure and the required changes and innovations in ways similar to those used for applications is important; you can ready more about this here: Infrastructure as Code
\nWe encourage all teams to adopt devops practices in the teams and to take care that there is a true collaboration between the different experts in a team and no invisible wall.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/devops-practices.md"},{"flag":"new","featured":true,"revisions":[{"name":"distributed-tracing","release":"2019-11-01","title":"Distributed Tracing","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/distributed-tracing.md","body":"Distributed Tracing creates visibility over processes spanning multiple applications.\nIn a microservice world where a request or operation involves multiple applications it is helpful to have an overview of what system is involved, at what point.\nAlso visibility of communicated data and errors helps to quickly identify issues in a microservice environment.\nOur tool of choice is Jaeger with B3 Propagation.
\n"}],"name":"distributed-tracing","title":"Distributed Tracing","ring":2,"quadrant":"platforms-and-aoe-services","body":"Distributed Tracing creates visibility over processes spanning multiple applications.\nIn a microservice world where a request or operation involves multiple applications it is helpful to have an overview of what system is involved, at what point.\nAlso visibility of communicated data and errors helps to quickly identify issues in a microservice environment.\nOur tool of choice is Jaeger with B3 Propagation.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/distributed-tracing.md"},{"flag":"default","featured":true,"revisions":[{"name":"docker","release":"2018-03-01","title":"Docker","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/docker.md","body":"Docker has pulled off very quickly and we updated it to "adopt".
\n"},{"name":"docker","release":"2017-03-01","title":"Docker","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/docker.md","body":"Docker is currently the most-used solution for creating and managing container-based infrastructures and deployments.
\nEssentially, Docker is a platform to build container images, distribute them and run them as an isolated process (using Linux kernel cgroups, network namespaces and custom mounts).
\nIn a DevOps environment, this helps a lot as we can run the exact same software and runtime (such as PHP) on both production and locally while developing. This enables us to debug our software much easier.
\nAlso, Docker allows us to keep our development setup much smaller and faster; instead of VirtualBox setups on a per-project base, we can compose our project development setup out of small containers. A CI environment building the containers allows us to package and test the whole environment instead of different software components on different runtimes in a much more stable way.
\nBacked by services such as Kubernetes, we can deploy Docker containers on a flexible infrastructure and enable our developers to test their software more easily in different environments.
\nHere at AOE, we assess Docker in different projects to become more flexible and faster, which increases our focus on development of even better and more stable software.
\n"}],"name":"docker","title":"Docker","ring":1,"quadrant":"platforms-and-aoe-services","body":"Docker has pulled off very quickly and we updated it to "adopt".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/docker.md"},{"flag":"new","featured":true,"revisions":[{"name":"eks","release":"2019-11-01","title":"Amazon EKS","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/eks.md","body":"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. \nAmazon EKS runs Kubernetes control plane instances across multiple Availability Zones to ensure high availability. \nIt also provides automated version upgrades and patching for them.
\nAmazon EKS is used as part of the infrastructure in the Congstar project. \nDifferent Amazon EKS Clusters are in use on a variety of environments like development, integration, testing and production.\nWe experienced that Kubernetes version updates are done without major efforts or impact to the running cluster.
\nAmazon EKS is fully supported by Terraform which brings the advantage that its configuration is written in code,\nwhich fulfils the infrastructure as code philosophy.
\n"}],"name":"eks","title":"Amazon EKS","ring":2,"quadrant":"platforms-and-aoe-services","body":"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. \nAmazon EKS runs Kubernetes control plane instances across multiple Availability Zones to ensure high availability. \nIt also provides automated version upgrades and patching for them.
\nAmazon EKS is used as part of the infrastructure in the Congstar project. \nDifferent Amazon EKS Clusters are in use on a variety of environments like development, integration, testing and production.\nWe experienced that Kubernetes version updates are done without major efforts or impact to the running cluster.
\nAmazon EKS is fully supported by Terraform which brings the advantage that its configuration is written in code,\nwhich fulfils the infrastructure as code philosophy.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/eks.md"},{"flag":"default","featured":true,"revisions":[{"name":"elasticsearch","release":"2018-03-01","title":"Elasticsearch","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/elasticsearch.md","body":"We are continuing to use Elasticsearch successfully in [Searchperience®] and have benefited from the aggregation features for related use cases such as rendering category trees.\nWe are also using Elasticsearch for some microservices as our persistence solution.
\nThis is why we have updated its status to adopt.
\n"},{"name":"elasticsearch","release":"2017-03-01","title":"Elasticsearch","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elasticsearch.md","body":"Elasticsearch is a REST-based search and analytics engine based on Lucene. Unlike its competitor Apache Solr, it was developed in the beginning with clustering and scaling in mind. It allows you to create complex queries while still delivering results very fast.
\nAt AOE, we use Elasticsearch for logging as well as our own search solution Searchperience®. We recently moved the Searchperience stack from Solr to Elasticsearch and think this was the right decision. Especially in terms of scaling, ease of use and performance, Elasticsearch really shines. Also, the API design took some of the learnings from Apache SOLR into account - for example, the queryDSL is a powerful way of describing different search use cases with highly flexible support of aggregations, etc.
\n"}],"name":"elasticsearch","title":"Elasticsearch","ring":1,"quadrant":"platforms-and-aoe-services","body":"We are continuing to use Elasticsearch successfully in [Searchperience®] and have benefited from the aggregation features for related use cases such as rendering category trees.\nWe are also using Elasticsearch for some microservices as our persistence solution.
\nThis is why we have updated its status to adopt.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/elasticsearch.md"},{"flag":"default","featured":true,"revisions":[{"name":"elk-stack","release":"2017-03-01","title":"ELK Stack","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elk-stack.md","body":"The company behind Elasticsearch offers a very nice solution for logging and analysis of distributed data such as logfiles.
\nIn today's increasingly distributed IT systems, it's very helpful to have a central view of what is going on in your systems - and of course nobody can and wants to look in different logfiles on different servers. A central logging solution provides the option to detect potential relationships between different events more easily. Also, also it can be used to extract useful KPIs or to visualize information on dashboards.
\nThe abbreviation "ELK Stack" stands for the Tools Elasticsearch, Logstash and Kibana: Together, they provide a solution for collecting data the ability to search, visualize and analyze data in real time.
\nLogstash is used to process and forward different data (or logfile) formats. Elasticsearch is used as a search index and together with the Kibana plugin you can configure highly individual dashboards. Recently, there are also the Beats Tools joining this toolstack to ship data to Elasticsearch.
\nWe have been using the ELK Stack for several years now in several projects and different infrastructure setups - we use it to visualize traffic, certain KPIs or just to analyze and search in application logs. We encourage all teams to use such a solution and take care to write useful logs in your applications.
\n"}],"name":"elk-stack","title":"ELK Stack","ring":1,"quadrant":"platforms-and-aoe-services","body":"The company behind Elasticsearch offers a very nice solution for logging and analysis of distributed data such as logfiles.
\nIn today's increasingly distributed IT systems, it's very helpful to have a central view of what is going on in your systems - and of course nobody can and wants to look in different logfiles on different servers. A central logging solution provides the option to detect potential relationships between different events more easily. Also, also it can be used to extract useful KPIs or to visualize information on dashboards.
\nThe abbreviation "ELK Stack" stands for the Tools Elasticsearch, Logstash and Kibana: Together, they provide a solution for collecting data the ability to search, visualize and analyze data in real time.
\nLogstash is used to process and forward different data (or logfile) formats. Elasticsearch is used as a search index and together with the Kibana plugin you can configure highly individual dashboards. Recently, there are also the Beats Tools joining this toolstack to ship data to Elasticsearch.
\nWe have been using the ELK Stack for several years now in several projects and different infrastructure setups - we use it to visualize traffic, certain KPIs or just to analyze and search in application logs. We encourage all teams to use such a solution and take care to write useful logs in your applications.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elk-stack.md"},{"flag":"new","featured":true,"revisions":[{"name":"event-storming","release":"2019-11-01","title":"Event Storming","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/event-storming.md","body":"Event Storming is a method of modeling business processes using domain events.
\nWith complex business processes, people usually know their part of the process very well.\nHaving people from different departments in one room, allows (and requires!) a conversation.\nKnowledge silos get opened up. All learnings can be directly visualized.
\nWe tried this method a couple of times with different sized scopes. We believe it can be of value and has potential.
\nIt's like brainstorming - with the goal to visualize a business line or process.
\nEvent Storming is done in a workshop format.
\nTo get a business process modeled quickly and complete, it's important to get domain experts, developers, UX and\neverybody else who is involved to some extend in the related business line into one room.\nWith virtually unlimited space for modeling using big paper rolls put onto the walls, equipped with colored stickies\nand markers, the modeling workshop can start.
\nDuring the workshop, the goal is to model the big picture, without limiting or focusing just on parts of a process.
\n"}],"name":"event-storming","title":"Event Storming","ring":3,"quadrant":"methods-and-patterns","body":"Event Storming is a method of modeling business processes using domain events.
\nWith complex business processes, people usually know their part of the process very well.\nHaving people from different departments in one room, allows (and requires!) a conversation.\nKnowledge silos get opened up. All learnings can be directly visualized.
\nWe tried this method a couple of times with different sized scopes. We believe it can be of value and has potential.
\nIt's like brainstorming - with the goal to visualize a business line or process.
\nEvent Storming is done in a workshop format.
\nTo get a business process modeled quickly and complete, it's important to get domain experts, developers, UX and\neverybody else who is involved to some extend in the related business line into one room.\nWith virtually unlimited space for modeling using big paper rolls put onto the walls, equipped with colored stickies\nand markers, the modeling workshop can start.
\nDuring the workshop, the goal is to model the big picture, without limiting or focusing just on parts of a process.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/event-storming.md"},{"flag":"default","featured":true,"revisions":[{"name":"evil-user-stories","release":"2017-03-01","title":"Evil User Stories","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/evil-user-stories.md","body":"With Evil User Stories, we aim to raise the project teams' (PO, Dev-Team, QA) and clients' awareness for security topics and introduce a security-by-design principle.
\nThe first step is to identify business use cases of potential vulnerabilities in our software product. The next step is to write an Evil User Story for this use case, from the perspective of an evil persona, e.g. "John Badboy who wants to hack our software". The idea behind this is to take a look at specific parts (business logic) of the software from a perspective that would otherwise not be considered when working on standard user stories.
\nSo how would this work? To illustrate this, let's consider the following user story: "As Emma Shopping I am be able to pay for a product in my checkout using a credit card". To get that story done, we might have to persist some payment data somewhere. But within the context of an Evil user story we now also need to consider the security for the credit card and payment handling in our application. So, for that reason, we write an Evil User Story, which in this case could, for example, be "As John Badboy, I want to steal payment data" or more specifically "As John Badboy, I want to do to sql inject to get the payment token".
\nBefore implementation of this particular user story starts, developers should think about how they can secure potentially vulnerable parts of the software to prevent attacks such as sql injections. In this case, one approach should be the use of prepared statements for sql queries. When the development is finished, we should then be able to test the story using an automated testing approach with a penetration testing tool such as sqlmap to confirm that our database queries are not vulnerable to sql injections.
\nAdditionally, both solutions should be checked during the development process using code reviews to identify and correct potentially buggy code.
\n"}],"name":"evil-user-stories","title":"Evil User Stories","ring":3,"quadrant":"methods-and-patterns","body":"With Evil User Stories, we aim to raise the project teams' (PO, Dev-Team, QA) and clients' awareness for security topics and introduce a security-by-design principle.
\nThe first step is to identify business use cases of potential vulnerabilities in our software product. The next step is to write an Evil User Story for this use case, from the perspective of an evil persona, e.g. "John Badboy who wants to hack our software". The idea behind this is to take a look at specific parts (business logic) of the software from a perspective that would otherwise not be considered when working on standard user stories.
\nSo how would this work? To illustrate this, let's consider the following user story: "As Emma Shopping I am be able to pay for a product in my checkout using a credit card". To get that story done, we might have to persist some payment data somewhere. But within the context of an Evil user story we now also need to consider the security for the credit card and payment handling in our application. So, for that reason, we write an Evil User Story, which in this case could, for example, be "As John Badboy, I want to steal payment data" or more specifically "As John Badboy, I want to do to sql inject to get the payment token".
\nBefore implementation of this particular user story starts, developers should think about how they can secure potentially vulnerable parts of the software to prevent attacks such as sql injections. In this case, one approach should be the use of prepared statements for sql queries. When the development is finished, we should then be able to test the story using an automated testing approach with a penetration testing tool such as sqlmap to confirm that our database queries are not vulnerable to sql injections.
\nAdditionally, both solutions should be checked during the development process using code reviews to identify and correct potentially buggy code.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/evil-user-stories.md"},{"flag":"default","featured":false,"revisions":[{"name":"explicit-test-strategy","release":"2017-03-01","title":"Explicit test strategy","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/explicit-test-strategy.md","body":"According to the ISTQB Glossar- a Test Strategy is an abstract specification that comprises the designated test levels (unit, integration, system and acceptance tests) and the implementation of each level for a whole organization or for an application. This test strategy can be applicable to one or more projects.
\nAt AOE, we established an explicit test strategy for many of our projects. The coordination of the test levels improves the effectivity of test runs and helps to avoid testing gaps, double inspection and overhead. Every test level has a different focus. Tests that are executed on one level don't have to be implemented on others.
\nThese are the test levels that we implement as a standard in the software deployment pipeline of our projects and that handle multiple integrated components and services:
\nAs a rule, we automate the execution of tests where it is feasible and sensible. Related to the test strategy are the test concept, test data management and the usage of a test case management tool that allows one to assess and categorize functional test cases.
\nDue to the practical usefulness of having a sound test strategy for a project, we classify the explicit test strategy for projects with assess.
\n"}],"name":"explicit-test-strategy","title":"explicit-test-strategy.md","quadrant":"methods-and-patterns","body":"According to the ISTQB Glossar- a Test Strategy is an abstract specification that comprises the designated test levels (unit, integration, system and acceptance tests) and the implementation of each level for a whole organization or for an application. This test strategy can be applicable to one or more projects.
\nAt AOE, we established an explicit test strategy for many of our projects. The coordination of the test levels improves the effectivity of test runs and helps to avoid testing gaps, double inspection and overhead. Every test level has a different focus. Tests that are executed on one level don't have to be implemented on others.
\nThese are the test levels that we implement as a standard in the software deployment pipeline of our projects and that handle multiple integrated components and services:
\nAs a rule, we automate the execution of tests where it is feasible and sensible. Related to the test strategy are the test concept, test data management and the usage of a test case management tool that allows one to assess and categorize functional test cases.
\nDue to the practical usefulness of having a sound test strategy for a project, we classify the explicit test strategy for projects with assess.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/explicit-test-strategy.md"},{"flag":"new","featured":true,"revisions":[{"name":"falco","release":"2019-11-01","title":"Falco","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/falco.md","body":"Falco is an open source project for intrusion and abnormality detection for Cloud Native platforms such as Kubernetes. \nIt detects abnormal application behavior and sends alerts via Slack, Fluentd, NATS, and more.
\nWe are assessing Falco to add another angle to host based intrusion detection and alerting.
\n"}],"name":"falco","title":"Falco","ring":3,"quadrant":"tools","body":"Falco is an open source project for intrusion and abnormality detection for Cloud Native platforms such as Kubernetes. \nIt detects abnormal application behavior and sends alerts via Slack, Fluentd, NATS, and more.
\nWe are assessing Falco to add another angle to host based intrusion detection and alerting.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/falco.md"},{"flag":"new","featured":true,"revisions":[{"name":"flamingo","release":"2019-11-01","title":"Flamingo","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flamingo.md","body":"Flamingo is a high productivity go based framework for rapidly building fast and pluggable web projects.\nIt is used to build scalable and maintainable (web)applications.
\nFlamingo is:
\nGo as simple, powerful and typesafe language is great to implement and scale serverside logic.\nFlamingo has a clean architecture with clear dependencies in mind and offers a typical features and support for nowadays web applications:
\nFlamingo itself does not contain ORM Mapper or libraries - instead it emphasizes "ports and adapters" architecture - so that you have a technology free (domain) model and any possible (and replaceable) persitence behind it.\nThat makes Flamingo useful to build microservices and applications - especially to build "frontends" or portals that require interaction with other (micro) services in a distributed architecture. \nWhen sticking to the architectural recommendation you can build modular applications with replaceable adapters that gives you independed testability.
\nWith "Flamingo Commerce" there is an additional active projects that offer rich and flexible features to build modern e-commerce applications.
\n"}],"name":"flamingo","title":"Flamingo","ring":1,"quadrant":"languages-and-frameworks","body":"Flamingo is a high productivity go based framework for rapidly building fast and pluggable web projects.\nIt is used to build scalable and maintainable (web)applications.
\nFlamingo is:
\nGo as simple, powerful and typesafe language is great to implement and scale serverside logic.\nFlamingo has a clean architecture with clear dependencies in mind and offers a typical features and support for nowadays web applications:
\nFlamingo itself does not contain ORM Mapper or libraries - instead it emphasizes "ports and adapters" architecture - so that you have a technology free (domain) model and any possible (and replaceable) persitence behind it.\nThat makes Flamingo useful to build microservices and applications - especially to build "frontends" or portals that require interaction with other (micro) services in a distributed architecture. \nWhen sticking to the architectural recommendation you can build modular applications with replaceable adapters that gives you independed testability.
\nWith "Flamingo Commerce" there is an additional active projects that offer rich and flexible features to build modern e-commerce applications.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flamingo.md"},{"flag":"default","featured":false,"revisions":[{"name":"flow","release":"2017-03-01","title":"Flow","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/flow.md","body":"Flow is a PHP web application framework developed for the Neos project.
\nWe used Flow in a couple of projects and decided to put it on hold for the following reasons:
\nAlthough it could be that some of the above-mentioned aspects have improved in the past, we decided to use other PHP frameworks such as Symfony or other Languages (See Go; Play Framework; Spring Boot)
\n"}],"name":"flow","title":"flow.md","quadrant":"languages-and-frameworks","body":"Flow is a PHP web application framework developed for the Neos project.
\nWe used Flow in a couple of projects and decided to put it on hold for the following reasons:
\nAlthough it could be that some of the above-mentioned aspects have improved in the past, we decided to use other PHP frameworks such as Symfony or other Languages (See Go; Play Framework; Spring Boot)
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/flow.md"},{"flag":"new","featured":true,"revisions":[{"name":"flowtype","release":"2019-11-01","title":"Flow","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flowtype.md","body":"Flow is a static type checker for JavaScript code. It's goal is to make code faster, smarter, \nmore confidently, and to a bigger scale.
\n"}],"name":"flowtype","title":"Flow","ring":3,"quadrant":"tools","body":"Flow is a static type checker for JavaScript code. It's goal is to make code faster, smarter, \nmore confidently, and to a bigger scale.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flowtype.md"},{"flag":"new","featured":true,"revisions":[{"name":"flux","release":"2019-11-01","title":"Flux","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flux.md","body":"Flux is an application architecture for building client-side web applications,\nwhich is based on React's composable view components.
\n"}],"name":"flux","title":"Flux","ring":3,"quadrant":"methods-and-patterns","body":"Flux is an application architecture for building client-side web applications,\nwhich is based on React's composable view components.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flux.md"},{"flag":"default","featured":false,"revisions":[{"name":"galen","release":"2017-03-01","title":"Galen","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/galen.md","body":"With Galen Framework, layout testing can be automated to save you a lot of manual work. With its own specification language (Galen Spec), you can write tests to verify the correct look of the web page as well as the location and alignment of specific elements on a page.
\nSo, you can write simple tests such as "The button should be green" as well as more complex behavior specifications such as "On mobile devices the button should be inside the viewport". Especially when testing a responsive website on multiple devices, browsers and resolutions, the manual testing effort gets expensive. To help with that, Galen runs its specifications fully automated with Selenium against the required browsers and devices.
\nWhenever a test fails Galen writes a test report with screenshots to show the mismatching areas on the page to help testers and developers become aware of the problem.
\nAt AOE, the Galen Framework helps us to continuously test the UI for potential regression bugs introduced by new features.
\n"}],"name":"galen","title":"galen.md","quadrant":"tools","body":"With Galen Framework, layout testing can be automated to save you a lot of manual work. With its own specification language (Galen Spec), you can write tests to verify the correct look of the web page as well as the location and alignment of specific elements on a page.
\nSo, you can write simple tests such as "The button should be green" as well as more complex behavior specifications such as "On mobile devices the button should be inside the viewport". Especially when testing a responsive website on multiple devices, browsers and resolutions, the manual testing effort gets expensive. To help with that, Galen runs its specifications fully automated with Selenium against the required browsers and devices.
\nWhenever a test fails Galen writes a test report with screenshots to show the mismatching areas on the page to help testers and developers become aware of the problem.
\nAt AOE, the Galen Framework helps us to continuously test the UI for potential regression bugs introduced by new features.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/galen.md"},{"flag":"default","featured":true,"revisions":[{"name":"gatlin","release":"2018-03-01","title":"Gatling","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gatlin.md","body":"Gatling is now the de-facto tool of choice for load testing in all of our\nprojects, having superseded JMeter completely. We therefore moved it to the\nAdopt level.
\n"},{"name":"gatlin","release":"2017-03-01","title":"Gatling","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gatlin.md","body":"Gatling is a highly capable load testing tool. It is designed for ease of use, maintainability and high performance.
\nOut of the box, Gatling comes with excellent support of the HTTP protocol that makes it a tool of choice for load testing any HTTP server. As the core engine is actually protocol agnostic, it is perfectly possible to implement support for other protocols. For example, Gatling currently also ships JMS support.
\nGatling is built with Scala Lang and Akka. By making good use of Scala's native language features (such as as the extensive type system), it makes writing tests feel natural and expressive, instead of writing load tests based on a DSL encoded in some special syntax.
\nThis allows us to use all native Scala features to work with, with the focus on the ability to structure your tests as pure code, and actually unit test your load tests.
\nBesides the very good performance, we definitely like the pure code-based approach. Gatling creates HTML-based reports with nice graphs and metrics about how and what was tested.
\nWe use Gatling as an alternative to Jmeter with success in some of our projects. We encourage teams to try Gatling for future load testing. There is an integrated test recorder similiar to what other test frameworks have to get you started with a basic test case.
\n"}],"name":"gatlin","title":"Gatling","ring":1,"quadrant":"tools","body":"Gatling is now the de-facto tool of choice for load testing in all of our\nprojects, having superseded JMeter completely. We therefore moved it to the\nAdopt level.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gatlin.md"},{"flag":"new","featured":true,"revisions":[{"name":"gitflow","release":"2019-11-01","title":"GitFlow","ring":4,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitflow.md","body":"Ever since there are recurring discussions about the version control strategy that a team should use.
\nWe have also made the experience when new teams start off with using blocking or long lived feature branches (merge late once all review comments are done) it has a negative impact on team performance.
\nWe recommend to use trunk based development with short lived (<1day) feature branches, because this has shown to support continuous integration and team collaboration the best. However we do accept teams choices to use GitFlow, we just do not try to encourage them in the first place.
\nSee also:
\nEver since there are recurring discussions about the version control strategy that a team should use.
\nWe have also made the experience when new teams start off with using blocking or long lived feature branches (merge late once all review comments are done) it has a negative impact on team performance.
\nWe recommend to use trunk based development with short lived (<1day) feature branches, because this has shown to support continuous integration and team collaboration the best. However we do accept teams choices to use GitFlow, we just do not try to encourage them in the first place.
\nSee also:
\nMoved to "adopt": Gitlab has proven to be a very useful tool for code and the collaboration around it.\nWith Gitlab CI there is also a powerful tool to automate continuous integration and delivery.
\n"},{"name":"gitlab","release":"2018-03-01","title":"Gitlab","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gitlab.md","body":"Gitlab provides nearly the same feature set as Github, but at a lower price. It also provides the possibility of hosting iternally, which is essential for us.
\nWe are migrating more and more repositories from gitolite, even from SVN to gitlab, as it provides a more stable and user friendly interface.
\nGitlab also makes user/permission handling easier than our old gitolite. We don't need the IT team every time a new repository needs to be set up.
\n"}],"name":"gitlab","title":"Gitlab","ring":1,"quadrant":"tools","body":"Moved to "adopt": Gitlab has proven to be a very useful tool for code and the collaboration around it.\nWith Gitlab CI there is also a powerful tool to automate continuous integration and delivery.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab.md"},{"flag":"changed","featured":true,"revisions":[{"name":"gitlab-ci","release":"2019-11-01","title":"Gitlab CI","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab-ci.md","body":"Moved to "adopt".
\n"},{"name":"gitlab-ci","release":"2018-03-01","title":"Gitlab CI","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gitlab-ci.md","body":"Until now, we have been using Jenkins for almost every single task that we have automated. With Gitlab CI on the market, we have a number of new possibilities.
\nSome of the highlights are:
\nMoved to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab-ci.md"},{"flag":"changed","featured":true,"revisions":[{"name":"go-lang","release":"2019-11-01","title":"Go / Golang","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/go-lang.md","body":"We have moved Go to "adopt".
\n"},{"name":"go-lang","release":"2018-03-01","title":"Go / Golang","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/go-lang.md","body":"We have moved Go to Trial because multiple teams have used Go with success for different services and tools.\nThe learning curve and productivity have proven to be immense and we are convinced that this language will find more adoption in other teams.
\n"},{"name":"go-lang","release":"2017-03-01","title":"Go / Golang","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/go-lang.md","body":"2016 was the year of Go, with a lot of Open Source projects gaining a lot of attention and many companies started to use it.
\nGo went from #54 to #13 on the TIOBE index in January 2017, and it became the TIOBE programming language of the year 2016.
\nHere at AOE, we use several services written in Go on a daily basis, such as Mattermost, Docker, Consul and Kubernetes. Also, more and more applications, such as Gitlab, incorporate Go-based services to "off load" heavy work.
\nGo, as a programming language, has some very interesting features such as native support for concurrency (go routines), static compiled binaries with a very small memory footprint, cross compiling and much more. A big advantage of Go is the very flat learning curve, which allows developers from more dynamic languages such as PHP to be proficient in a very short time.
\nIf you want to get a feeling for Go, you should start with the online tour, within a day you'll have a good understanding of the core concepts, syntax, etc. - that is also because the language often tries to provide only one simple way of doing things; an example for this is that code formatting and styling is defined (yet not enforced as in Python). Part of this is also that Go itself is very opinionated: So, for example, for object oriented programming in Go, composition is the prefered way of defining data structures, and some might miss advanced concepts such as inheritance.
\nWe currently use Go for projects and microservices where we need flexibility and performance.
\n"}],"name":"go-lang","title":"Go / Golang","ring":1,"quadrant":"languages-and-frameworks","body":"We have moved Go to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/go-lang.md"},{"flag":"default","featured":true,"revisions":[{"name":"gradle","release":"2017-03-01","title":"Gradle","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gradle.md","body":"Gradle is a build automation tool originating in the Java space, providing declarative dependency management (like Maven) and support for custom functionality (like Ant). It has superb multi-project support and is extremely extensible via third-party plugins and also via self-written extensions and plugins that make it outstanding in its area.
\nIt uses a Groovy-based DSL to declaratively model your problem domain (Build automation) and provides a rich object model with extension points to customize the build logic. Because it is extremely easy to extend this DSL, you can easily provide a declarative interface to your customizations and add-ons.
\nWhile providing plugins for building libs, apps and webapps in Java, Groovy and Scala out of the box it is not tied to the JVM as target platform, which is impressively shown by the native build support for C / C++.
\nAt AOE, it is used in various places already: to build Anypoint- and Spring Boot- based applications; to build Android Apps; to automate the creation of Jenkins Jobs; to create Docker images and Debian packages and also do some deployment scripting with it.
\n"}],"name":"gradle","title":"Gradle","ring":1,"quadrant":"tools","body":"Gradle is a build automation tool originating in the Java space, providing declarative dependency management (like Maven) and support for custom functionality (like Ant). It has superb multi-project support and is extremely extensible via third-party plugins and also via self-written extensions and plugins that make it outstanding in its area.
\nIt uses a Groovy-based DSL to declaratively model your problem domain (Build automation) and provides a rich object model with extension points to customize the build logic. Because it is extremely easy to extend this DSL, you can easily provide a declarative interface to your customizations and add-ons.
\nWhile providing plugins for building libs, apps and webapps in Java, Groovy and Scala out of the box it is not tied to the JVM as target platform, which is impressively shown by the native build support for C / C++.
\nAt AOE, it is used in various places already: to build Anypoint- and Spring Boot- based applications; to build Android Apps; to automate the creation of Jenkins Jobs; to create Docker images and Debian packages and also do some deployment scripting with it.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gradle.md"},{"flag":"changed","featured":true,"revisions":[{"name":"grafana","release":"2019-11-01","title":"Grafana","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grafana.md","body":"Updated to "adopt"
\n"},{"name":"grafana","release":"2018-03-01","title":"Grafana","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/grafana.md","body":"Grafana is an Open Source data visualization platform written in Go and NodeJS. It provides a vast choice of different graph types that can be easily combined into dashboards for displaying any kind of numerical or time-based data.
\nAt AOE, we usually use Grafana in conjunction with Prometheus or AWS CloudWatch for visualizing both application and infrastructure metrics.
\n"}],"name":"grafana","title":"Grafana","ring":1,"quadrant":"platforms-and-aoe-services","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grafana.md"},{"flag":"new","featured":true,"revisions":[{"name":"graphql","release":"2019-11-01","title":"GraphQL","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/graphql.md","body":"GraphQL is a query language for your API, and a server-side runtime for executing queries by using a type system you define for your data. GraphQL isn't tied to any specific database or storage engine and is instead backed by your existing code and data.
\nGraphQL was developed by Facebook around 2010 and releases 2015. \nThe main challenge it solves is to improve communication between browser and server on high dynamic web apps.
\nThe advantages are:
\nWe are using it together with Apollo Client in our React.js based frontend.\nThis way the React components have their relevant GraphQL snippet, defining what data they request or mutate from the "backend for frontend", directly coupled. \nThat makes it transparent what data is available. Apollo takes care of sending an aggregated GraphQL query to the backend.
\nThe framework Flamingo offers support for GraphQL and also Flamingo Commerce offers a full featured GraphQL API for e-commerce features. (Example GraphQL Console for Commerce)
\n"}],"name":"graphql","title":"GraphQL","ring":1,"quadrant":"methods-and-patterns","body":"GraphQL is a query language for your API, and a server-side runtime for executing queries by using a type system you define for your data. GraphQL isn't tied to any specific database or storage engine and is instead backed by your existing code and data.
\nGraphQL was developed by Facebook around 2010 and releases 2015. \nThe main challenge it solves is to improve communication between browser and server on high dynamic web apps.
\nThe advantages are:
\nWe are using it together with Apollo Client in our React.js based frontend.\nThis way the React components have their relevant GraphQL snippet, defining what data they request or mutate from the "backend for frontend", directly coupled. \nThat makes it transparent what data is available. Apollo takes care of sending an aggregated GraphQL query to the backend.
\nThe framework Flamingo offers support for GraphQL and also Flamingo Commerce offers a full featured GraphQL API for e-commerce features. (Example GraphQL Console for Commerce)
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/graphql.md"},{"flag":"changed","featured":true,"revisions":[{"name":"groovy","release":"2019-11-01","title":"Groovy","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/groovy.md","body":"Since the rise of Kotlin, we seen no need why to still use Groovy as an alternative to Java running on the JVM.
\n"},{"name":"groovy","release":"2017-03-01","title":"Groovy","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/groovy.md","body":"Groovy is a dynamically typed compiled language running on the JVM. It is easy to learn as it provides a familiar syntax for Java programmers, but also offers advanced features such as closures and makes some mandatory Java syntax requirements optional to enhance the conciseness of the code. These features make Groovy especially well-suited for scripting and domain-specific languages. This is used by popular tools such as Gradle or Spock.
\nAt AOE, Groovy is used in many projects and areas. We use Gradle as a build system, we carry out unit and integration testing with Spock and Geb, we generate Jenkins jobs with JobDSL and we implement complete services with Groovy and Spring Boot.
\n"}],"name":"groovy","title":"Groovy","ring":4,"quadrant":"languages-and-frameworks","body":"Since the rise of Kotlin, we seen no need why to still use Groovy as an alternative to Java running on the JVM.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/groovy.md"},{"flag":"changed","featured":true,"revisions":[{"name":"grpc","release":"2019-11-01","title":"GRPC","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grpc.md","body":"We adopted GRPC, because its used on multiple places within our microservice oriented architectures for internal communication.
\n"},{"name":"grpc","release":"2018-03-01","title":"GRPC","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/grpc.md","body":"gRPC, "A high-performance, Open Source, universal RPC framework," is a framework to easily connect clients and servers in an RPC setup.\ngRPC was initially built at Google, and uses protobuf service definitions for method and payload specification.\nEssentially, this makes it possible to define methods that a server exposes, with either a single payload or an incoming stream - either as a single response or a stream of responses.\nThe definition itself is carried out with the help of protobuf to define message types and method signatures, and then client and server interfaces are compiled for the language(s) you want. Currently there is support for languages such as C++, Java, Python, Go and many more.\nThe shared language-neutral protobuf definition allows you to create all code for all languages automatically and helps with the interoperability of different systems.
\nFrom a technical point of view, gRPC uses HTTP/2 as a transport, directly benefitting from the default TLS encryption.\nBesides gRPC, other frameworks also use protobuf RPC definitions. These frameworks include twirp from twitch, which makes it easy to change the transport/control layer with only very small changes to the application code.
\nWe at AOE plan to assess gRPC for microservice architectures which are more RPC style and less REST style.
\n"}],"name":"grpc","title":"GRPC","ring":1,"quadrant":"languages-and-frameworks","body":"We adopted GRPC, because its used on multiple places within our microservice oriented architectures for internal communication.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grpc.md"},{"flag":"default","featured":false,"revisions":[{"name":"grunt","release":"2017-03-01","title":"Grunt","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/grunt.md","body":"Grunt is a JavaScript task runner that automates repetitive tasks. While Grunt served us well for a good amount of projects,\nother alternatives such as Gulp emerged in the meantime and proved to be a better pick for the\nmajority of our teams.
\nWe have two main reasons for discarding Grunt in favor of other tools:
\nIf a decent amount of tasks is reached, Grunt is known to run slower than other tools, because it heavily relies on I/O operations and\nalways stores the result of one task as files on the disk.
\nOn large projects where a lot of automation is required, it can get very tedious to maintain complex and parallel running tasks.\nThe grunt configuration files sometimes simply don´t gave us the flexibility that we needed.
\nCurrently our preferred way to go is either simply use NPM scripts or rely on Webpack loaders for file preprocessing. For non-webpack projects we also utilize Gulp.
\n"}],"name":"grunt","title":"grunt.md","quadrant":"tools","body":"Grunt is a JavaScript task runner that automates repetitive tasks. While Grunt served us well for a good amount of projects,\nother alternatives such as Gulp emerged in the meantime and proved to be a better pick for the\nmajority of our teams.
\nWe have two main reasons for discarding Grunt in favor of other tools:
\nIf a decent amount of tasks is reached, Grunt is known to run slower than other tools, because it heavily relies on I/O operations and\nalways stores the result of one task as files on the disk.
\nOn large projects where a lot of automation is required, it can get very tedious to maintain complex and parallel running tasks.\nThe grunt configuration files sometimes simply don´t gave us the flexibility that we needed.
\nCurrently our preferred way to go is either simply use NPM scripts or rely on Webpack loaders for file preprocessing. For non-webpack projects we also utilize Gulp.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grunt.md"},{"flag":"default","featured":false,"revisions":[{"name":"gulp","release":"2017-03-01","title":"Gulp","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gulp.md","body":"Gulp is a javascript task runner much like Grunt. The tasks are written in javascript code.
\nIt is a tool that helps you automate numerous tasks surrounding web development. A typical use is to configure preprocessors for Sass, to compile CSS or to optimize CSS, Javascript and Images.
\nWith Gulp and its many plugins you can also do stuff such as start a web server and reload the browser if changes happen.
\nTo get started you need to install Gulp on your machine via npm.
\nnpm install gulp -g\n\n\nYou also need it locally in your project, so you have to install it as a dependency in your project .
\nnpm install gulp --save-dev\n\n\nYou can split your tasks into various smaller sub-tasks and even split it up into smaller files.
\nA basic Gulp task can look like this:
\nconst gulp = require('gulp');\n// Requires the gulp-sass plugin\nconst sass = require('gulp-sass');\nconst autoprefixer = require('gulp-autoprefixer');\nconst cssnano = require('gulp-cssnano');\n\ngulp.task('sass', function(){\n return gulp.src('app/scss/**/*.scss') // tell gulp where your source files are\n .pipe(sass()) // Converts sass into css with the help of a gulp plugin called gulp-sass\n .pipe(autoprefixer({browsers: ['last 2 versions']})) // auto prefixes the css for the last 2 versions of browser, like ie9 specific css\n .pipe(cssnano()) // minify the css\n .pipe(gulp.dest('app/css')) // tell gulp where to put the converted file. this is the first time where a file is written\n});\n\n\nyou can now run this task simply by executing the following command in your terminal:
\ngulp sass\n\n"}],"name":"gulp","title":"gulp.md","quadrant":"tools","body":"Gulp is a javascript task runner much like Grunt. The tasks are written in javascript code.
\nIt is a tool that helps you automate numerous tasks surrounding web development. A typical use is to configure preprocessors for Sass, to compile CSS or to optimize CSS, Javascript and Images.
\nWith Gulp and its many plugins you can also do stuff such as start a web server and reload the browser if changes happen.
\nTo get started you need to install Gulp on your machine via npm.
\nnpm install gulp -g\n\n\nYou also need it locally in your project, so you have to install it as a dependency in your project .
\nnpm install gulp --save-dev\n\n\nYou can split your tasks into various smaller sub-tasks and even split it up into smaller files.
\nA basic Gulp task can look like this:
\nconst gulp = require('gulp');\n// Requires the gulp-sass plugin\nconst sass = require('gulp-sass');\nconst autoprefixer = require('gulp-autoprefixer');\nconst cssnano = require('gulp-cssnano');\n\ngulp.task('sass', function(){\n return gulp.src('app/scss/**/*.scss') // tell gulp where your source files are\n .pipe(sass()) // Converts sass into css with the help of a gulp plugin called gulp-sass\n .pipe(autoprefixer({browsers: ['last 2 versions']})) // auto prefixes the css for the last 2 versions of browser, like ie9 specific css\n .pipe(cssnano()) // minify the css\n .pipe(gulp.dest('app/css')) // tell gulp where to put the converted file. this is the first time where a file is written\n});\n\n\nyou can now run this task simply by executing the following command in your terminal:
\ngulp sass\n\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gulp.md"},{"flag":"changed","featured":false,"revisions":[{"name":"hal-hateoas","release":"2019-11-01","featured":false,"title":"hal-hateoas.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hal-hateoas.md","body":"We use HAL in cases where we need to link ressources in payloads. HATEOAS has not proven to be very useful in our projects.
\n"},{"name":"hal-hateoas","release":"2018-03-01","title":"HAL / HATEOAS","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/hal-hateoas.md","body":"We still recommend the usage of HAL and HATEOAS.
\nBut, depending on the resource structure, there are some pitfalls to be aware of:
\nHypermedia As The Engine Of Application State or in short HATEOAS is a pattern that helps to organize dependencies and resources in a RESTful API. The basic idea of HATEOAS is that an API consumer do not have to know how dependencies of resources are connected and how to get them. A consumer must only be familiar with the basics of hypermedia.
\nLet's assume we have a bank account and an action to deposit money on that account. Everything you need to know is that the account resource has an action for a deposit. The URL of that action can then fetched from the link attribute with the corresponding relation.
\n<account>\n <account_number>12345</account_number>\n <balance currency="usd">-25.00</balance>\n <link rel="deposit" href="https://bank.example.com/account/12345/deposit" />\n</account>\nBesides from HATEOAS there is an alternative implementation called Hypertext Application Language, in short HAL, which has much more features than the basic HATEOAS.
\nWith HAL you are allowed to also define parametrized links, embedded resources and documentation relations (which are called curies). You can find the specification here.\nhttp://stateless.co/hal_specification.html
\nIf you want to link different api endpoints or ressource locations in your API responses you should use this standard.
\n"}],"name":"hal-hateoas","title":"hal-hateoas.md","quadrant":"methods-and-patterns","body":"We use HAL in cases where we need to link ressources in payloads. HATEOAS has not proven to be very useful in our projects.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hal-hateoas.md"},{"flag":"changed","featured":true,"revisions":[{"name":"helm","release":"2019-11-01","title":"Helm","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/helm.md","body":"Helm is a package manager for Kubernetes, which simplifies the deployment\nof applications into a Kubernetes cluster and provides additional features like e.g. versioning and rollbacks.
\n"},{"name":"helm","release":"2018-03-01","title":"Helm","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/helm.md","body":"For managing deployments within Kubernetes we use Helm, which makes templating Kubernetes configuration files super easy (also known as Helm charts).
\n"}],"name":"helm","title":"Helm","ring":2,"quadrant":"platforms-and-aoe-services","body":"Helm is a package manager for Kubernetes, which simplifies the deployment\nof applications into a Kubernetes cluster and provides additional features like e.g. versioning and rollbacks.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/helm.md"},{"flag":"changed","featured":false,"revisions":[{"name":"hystrix","release":"2019-11-01","featured":false,"title":"hystrix.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hystrix.md","body":"Hystrix is not actively mainatined anymore and some of its goals can now be handled with service meshs.
\n"},{"name":"hystrix","release":"2017-03-01","title":"Hystrix ","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/hystrix.md","body":"Hystrix is a very powerful library for handling failures, fallbacks and latency management within complex distributed environments. Netflix developed it and after years of experience, they are using it in almost each of their microservices. It evolved to a great library for handling resilience in complex architectures and covers solutions for the most common resilience patterns like:
\nBeside from that purposes Hystrix also offers some helpful features like parallel and asynchronous execution, In-Request-Caching and other useful features for working with distributed systems.
\nAnother useful component that you are able to use with Hystrix is his dashboard that give you the ability of real time monitoring of external dependencies and how they behave. Alerting is also able via the dashboard.
\n"}],"name":"hystrix","title":"hystrix.md","quadrant":"tools","body":"Hystrix is not actively mainatined anymore and some of its goals can now be handled with service meshs.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hystrix.md"},{"flag":"default","featured":false,"revisions":[{"name":"imgix","release":"2017-03-01","title":"imgix","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/imgix.md","body":"Imgix is an SaaS solution for delivering and processing images. When developing responsive websites, you will quickly reach the point where you need various versions of your images to achieve a good responsive user interface. You want high quality versions for retina displays but small versions for mobile devices with a slow Internet connection.
\nEspecially when dealing with user-generated uploads, it is getting hard to create different versions for any supported device and breakpoint of your web page. Doing this manually is hardly an option.
\nAt AOE, we decided to use imgix as an image processing service for some projects to solve this problem. The benefits of imgix are the simple API to create responsive images in real-time as well as the fast delivery over their CDN.
\n"}],"name":"imgix","title":"imgix.md","quadrant":"platforms-and-aoe-services","body":"Imgix is an SaaS solution for delivering and processing images. When developing responsive websites, you will quickly reach the point where you need various versions of your images to achieve a good responsive user interface. You want high quality versions for retina displays but small versions for mobile devices with a slow Internet connection.
\nEspecially when dealing with user-generated uploads, it is getting hard to create different versions for any supported device and breakpoint of your web page. Doing this manually is hardly an option.
\nAt AOE, we decided to use imgix as an image processing service for some projects to solve this problem. The benefits of imgix are the simple API to create responsive images in real-time as well as the fast delivery over their CDN.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/imgix.md"},{"flag":"changed","featured":true,"revisions":[{"name":"infrastructure-as-code","release":"2019-11-01","title":"Infrastructure as Code","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/infrastructure-as-code.md","body":"Updated to "adopt"
\n"},{"name":"infrastructure-as-code","release":"2017-03-01","title":"Infrastructure as Code","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/infrastructure-as-code.md","body":"Infrastructure as Code (IaC) describes the process of managing all infrastructure resources via code. Treating infrastructure code the same way we treat application code, we can benefit from the same advantages of having a history in our version control system, doing code reviews and rolling out updates via a Continuous Delivery pipeline in a way that closely approaches how we handle application deployments.
\nInfrastructure code is often described in a declarative language und the target platforms figure out what to create, update or delete in order to get to the desired state, while doing this in a safe and efficient way. We've worked with AWS CloudFormation in the past, and while this is a great tool, you can only manage AWS resources with it and you need some more tooling around it in order to automate things nicely and embed it into other processes such as Jenkins Jobs. That's what we created StackFormation for. Another tool that is actively developed is Terraform. Terraform comes with a lot of concepts that make managing environments easier out of the box and nicely embeds into other related tools. Also, Terraform allows you to manage a variety of different infrastructure providers.
\nInfrastructure as code should cover everything from orchestration of your infrastructure resources, networking and provisioning as well as monitoring setup. The orchestration tools mentioned above are supplemented by other tools such as Puppet, Chef or simple Bash scripts that take over provisioning the instances after they are booted.
\n"}],"name":"infrastructure-as-code","title":"Infrastructure as Code","ring":1,"quadrant":"platforms-and-aoe-services","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/infrastructure-as-code.md"},{"flag":"default","featured":true,"revisions":[{"name":"invision","release":"2018-03-01","title":"Invision","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/invision.md","body":"Invision is an online tool used to work and collaborate on design and prototypes and to share them between clients and the team.
\nWe use it in many projects now to present prototypes and designs and it helps in understanding the planned user experience.\nAlso, we use this directly as a reference from the user stories to help the development teams in understanding and implementing the right frontend and backend functionalities.
\n"}],"name":"invision","title":"Invision","ring":2,"quadrant":"tools","body":"Invision is an online tool used to work and collaborate on design and prototypes and to share them between clients and the team.
\nWe use it in many projects now to present prototypes and designs and it helps in understanding the planned user experience.\nAlso, we use this directly as a reference from the user stories to help the development teams in understanding and implementing the right frontend and backend functionalities.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/invision.md"},{"flag":"new","featured":true,"revisions":[{"name":"jaeger","release":"2019-11-01","title":"Jaeger","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/jaeger.md","body":"Jaeger is a tool for Distributed Tracing. Developed at Uber and inspired by Dapper and OpenZipkin it grew into an Cloud Native Computing Foundation project.
\nJaeger is a great tool for troubleshooting distributed systems, such as microservice architectures. Developers and Operation can quickly see communicaiton between services, and what data is communicated where.\nErrors in services can be traced to the originating system. Global trace identifiers are communicated using B3 headers. Jaeger supports Zipkin, which allows easy migration von OpenZipkin & co.
\n"}],"name":"jaeger","title":"Jaeger","ring":2,"quadrant":"platforms-and-aoe-services","body":"Jaeger is a tool for Distributed Tracing. Developed at Uber and inspired by Dapper and OpenZipkin it grew into an Cloud Native Computing Foundation project.
\nJaeger is a great tool for troubleshooting distributed systems, such as microservice architectures. Developers and Operation can quickly see communicaiton between services, and what data is communicated where.\nErrors in services can be traced to the originating system. Global trace identifiers are communicated using B3 headers. Jaeger supports Zipkin, which allows easy migration von OpenZipkin & co.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/jaeger.md"},{"flag":"default","featured":true,"revisions":[{"name":"jest","release":"2018-03-01","title":"Jest","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/jest.md","body":"Updated to "adopt".
\n"},{"name":"jest","release":"2017-03-01","title":"Jest ","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/jest.md","body":"Jest is a javascript testing framework by facebook to test javascript code and react applications / components.
\nWe started using Jest (and watchmen) instead of Karma because it:
\nIt is easy to set up. And even if you have a running setup with karma/chai you can easily replace karma with jest. With a small workaround, chai and jest test matchers work fine together.
\n"}],"name":"jest","title":"Jest","ring":1,"quadrant":"tools","body":"Updated to "adopt".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/jest.md"},{"flag":"default","featured":false,"revisions":[{"name":"job-dsl","release":"2017-03-01","title":"Job DSL (Jenkins)","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/job-dsl.md","body":"The Job DSL is a plugin for the Jenkins automation server. Jenkins jobs that automate parts of a software project are usually configured using the web interface of Jenkins. If Jenkins is the choice for your project and the number of build jobs tend to grow, the Job DSL plugin is your friend.
\nThe plugin allows Jenkins jobs to be described by code (Groovy DSL). This code is then used for generating Jenkins jobs. As a consequence, job configuration can be part of the project's source code. During the generation step, existing jobs are synchronized, overwritten or left alone, depending on the configuration. The same configuration manages deleting or ignoring jobs that are not described in code anymore. Jobs can easily be restored in case of data loss and changed without clicking buttons for hours. The automation also makes it easy to seed large numbers of homogeneous components and builds on different branches.
\nThe ability to treat Jenkins jobs as code is a big advantage. We highly suggest that every team automate the setup of their jobs and their pipelines. Another way of expressing build pipelines as code is the new Jenkins Pipeline feature - but still we see the need of Job DSL seeder jobs to seed the Jenkins pipeline jobs themselves and any additional jobs.
\n"}],"name":"job-dsl","title":"job-dsl.md","quadrant":"tools","body":"The Job DSL is a plugin for the Jenkins automation server. Jenkins jobs that automate parts of a software project are usually configured using the web interface of Jenkins. If Jenkins is the choice for your project and the number of build jobs tend to grow, the Job DSL plugin is your friend.
\nThe plugin allows Jenkins jobs to be described by code (Groovy DSL). This code is then used for generating Jenkins jobs. As a consequence, job configuration can be part of the project's source code. During the generation step, existing jobs are synchronized, overwritten or left alone, depending on the configuration. The same configuration manages deleting or ignoring jobs that are not described in code anymore. Jobs can easily be restored in case of data loss and changed without clicking buttons for hours. The automation also makes it easy to seed large numbers of homogeneous components and builds on different branches.
\nThe ability to treat Jenkins jobs as code is a big advantage. We highly suggest that every team automate the setup of their jobs and their pipelines. Another way of expressing build pipelines as code is the new Jenkins Pipeline feature - but still we see the need of Job DSL seeder jobs to seed the Jenkins pipeline jobs themselves and any additional jobs.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/job-dsl.md"},{"flag":"default","featured":true,"revisions":[{"name":"keycloak","release":"2018-03-01","title":"Keycloak","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/keycloak.md","body":"Most distributed systems still face a growing demand for user management, authentication, authorization and Single sign-on. In light of a growing security demand and specialization, the Open Source project JBoss Keycloak is a perfect match.
\nKeyloak has been a growing project from the outset and has a strong community. Keyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on the protocol level with a library is simple. Many configurable features require no coding in the integrated projects. The required configuration is managed via code and promoted as usual.
\nWe use Keycloak in our OM3 suite for several authentication-related use cases – such as user management for system users and Single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information. In addition, Keycloak is part of the AOE infrastructure and helps in securing the various services to support employees and customers.
\n"},{"name":"keycloak","release":"2017-03-01","title":"Keycloak","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/keycloak.md","body":"User management, authentication, authorization and Single Sign-On are part of most distributed systems nowadays. Building these sensitive and serious parts on your own might be a problem due to knowledge- and budget restrictions. Because of growing requirements in that field (social logins, single sign-on, federation, two-factor authentication, etc.), as well as growing security concerns, building these things on your own has become more challenging during the past decade.
\nAs a consequence, the recommendation is: use an existing solution and connect it with your project's codebase using provided standards. Our recommended solution is the Open Source project JBoss Keycloak. We use Keycloak in our OM3 suite for several authentication-related use cases - such as user management for system users and single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information.
\nKeyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by the Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on protocol level with a library is simple. A lot of configurable features require no coding in the integrated projects.
\nBy design, the Keycloak project offers customizability and extensibility via so-called SPIs, e.g. a custom authenticator can be implemented to address project specific problems.
\nKeycloak normally runs standalone and can use various database products. A docker image is available to start in a containerized environment.
\nKeycloak might be overkill, depending on your project needs. For a simple integration with, for instance, a social login provider (Facebock, Twitter, etc.) Keycloak might be too much. For a JVM project, the pac4j library might be an alternative. If a cloud-based solution is preferred and data privacy concerns are not an issue, Auth0 might be the choice.
\n"}],"name":"keycloak","title":"Keycloak","ring":1,"quadrant":"tools","body":"Most distributed systems still face a growing demand for user management, authentication, authorization and Single sign-on. In light of a growing security demand and specialization, the Open Source project JBoss Keycloak is a perfect match.
\nKeyloak has been a growing project from the outset and has a strong community. Keyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on the protocol level with a library is simple. Many configurable features require no coding in the integrated projects. The required configuration is managed via code and promoted as usual.
\nWe use Keycloak in our OM3 suite for several authentication-related use cases – such as user management for system users and Single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information. In addition, Keycloak is part of the AOE infrastructure and helps in securing the various services to support employees and customers.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/keycloak.md"},{"flag":"new","featured":true,"revisions":[{"name":"kotlin","release":"2019-11-01","title":"Kotlin","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/kotlin.md","body":"Kotlin is used successfully in production by multiple teams.
\nKotlin is 100% interoperable with Java. It means the code can live side-by-side in one code base and interact.\nFrom the beginning it was designed with practical thought in mind. So the IDE Support in IntelliJ is really great.
\nThe Spring Framework Developer put a lot of effort that Springs play well together with Kotlin.
\nWith it's concise syntax, null safety, \nDue to its explicit type system, this language is also great replacement for Groovy usage with Gradle.
\n"}],"name":"kotlin","title":"Kotlin","ring":1,"quadrant":"languages-and-frameworks","body":"Kotlin is used successfully in production by multiple teams.
\nKotlin is 100% interoperable with Java. It means the code can live side-by-side in one code base and interact.\nFrom the beginning it was designed with practical thought in mind. So the IDE Support in IntelliJ is really great.
\nThe Spring Framework Developer put a lot of effort that Springs play well together with Kotlin.
\nWith it's concise syntax, null safety, \nDue to its explicit type system, this language is also great replacement for Groovy usage with Gradle.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/kotlin.md"},{"flag":"default","featured":true,"revisions":[{"name":"kubernetes","release":"2018-03-01","title":"Kubernetes","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/kubernetes.md","body":"Kubernetes has developed into the quasi-standard for container orchestration: Nearly every cloud provider provides managed Kubernetes, and even Docker Enterprise uses Kubernetes.\nWe are running several production systems with Kubernetes and we are using it in concepts such as:
\nKubernetes is a container orchestration platform, which supports many different infrastructure providers. It allows you to deploy containers and takes care of running, scaling or self-healing your applications based on configurations you provide. It's based on years of knowledge and experience Google gained by using containers.
\nAt AOE, we started Kubernetes in a test environment on bare metal to experiment with it. It's currently used for running AOE internal apps such as dashboards as well as running builds in containers. We also started to use it for upcoming projects to run and manage several services. There are Tools to automate the setup of kubernetes in AWS like Cops. Another helpful tool is Minikube, which allows to test and run kubernetes locally.
\n"}],"name":"kubernetes","title":"Kubernetes","ring":1,"quadrant":"platforms-and-aoe-services","body":"Kubernetes has developed into the quasi-standard for container orchestration: Nearly every cloud provider provides managed Kubernetes, and even Docker Enterprise uses Kubernetes.\nWe are running several production systems with Kubernetes and we are using it in concepts such as:
\nRebuilding and packaging software from "third parties" (e.g. PHP, MySQL, Redis, Nginx, Java,...) implies starting to maintain the packaging for the desired distribution.
\nEven with tool support and targeted for automation, we found that building those packages is very often unstable. The effort to keep up with the upstream changes (security changes, fixes, etc...) exceeds the benefit in most cases. We prefer to not create our own packages and rather use what's available in the distribution repository.
\n"}],"name":"maintain-third-party-packages","title":"maintain-third-party-packages.md","quadrant":"platforms-and-aoe-services","body":"Rebuilding and packaging software from "third parties" (e.g. PHP, MySQL, Redis, Nginx, Java,...) implies starting to maintain the packaging for the desired distribution.
\nEven with tool support and targeted for automation, we found that building those packages is very often unstable. The effort to keep up with the upstream changes (security changes, fixes, etc...) exceeds the benefit in most cases. We prefer to not create our own packages and rather use what's available in the distribution repository.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/maintain-third-party-packages.md"},{"flag":"changed","featured":true,"revisions":[{"name":"micro-frontends","release":"2019-11-01","title":"Micro Frontends","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/micro-frontends.md","body":"When deciding on a system architecture we are always striving for technology neutralism. This is to allow us to stay\nflexible with future decisions. Micro Frontends can be a tool to support us with this goal.\nWe favor protocols and methods, such as plain HTML and HTTP, over specific technologies when designing Micro Frontends.
\nSince Micro Frontends have proven to allow use move fast and agile, we moved this pattern to "trial".
\n"},{"name":"micro-frontends","release":"2018-03-01","title":"Micro Frontends","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/micro-frontends.md","body":"We see many benefits in Microservices – especially in large teams – but often this architecture \ndoes not involve the user interface. Instead, you might end up maintaining a frontend monolith. With Micro Frontends \nyou enable your frontend developers to gain the same benefits that we have grown accustomed to in a Microservice architecture: \nDecoupled components, which are developed and deployed by independent teams. But what sounds reasonable comes with \nchallenges. Integrating different Frontends on the client- or server-side can be tricky, as well as keeping the overall \nUser Experience consistent.
\nDespite the challenges, Micro Frontends help us to develop large applications across multiple teams. Developers can\nwork more independently without having too much trouble maintaining a large codebase. Being able to update oder \nreplace Frontend libraries in some parts of the application is yet another benefit in the fast-moving world of \nfrontend development.
\n"}],"name":"micro-frontends","title":"Micro Frontends","ring":2,"quadrant":"methods-and-patterns","body":"When deciding on a system architecture we are always striving for technology neutralism. This is to allow us to stay\nflexible with future decisions. Micro Frontends can be a tool to support us with this goal.\nWe favor protocols and methods, such as plain HTML and HTTP, over specific technologies when designing Micro Frontends.
\nSince Micro Frontends have proven to allow use move fast and agile, we moved this pattern to "trial".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/micro-frontends.md"},{"flag":"default","featured":true,"revisions":[{"name":"microservices","release":"2018-03-01","title":"Microservices","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/microservices.md","body":"We continue to belief in the microservices concept and its related patterns and best practices. However, it's worth mentioning that we we had to learn some lessons when it came to resilient thinking and deployment-related dependencies between microservices.
\nWe feel that our microservice-based applications are more robust than monolithic ones have been. Thanks to the \nsplit of the overall complexity into multiple services, new employees or team members are becoming productive within days or a few weeks.
\nIn order to get microservices right and to benefit from the advantages, there is a lot more required.\nThe following "pyramid of need" for microservices shows this:\n
Microservices as an architecture style is getting very popular recently. At AOE, more and more teams are adding microservices to their existing application architecture or designing applications with microservices.
\nWe also like the term "self-contained systems" instead of microservices.
\nThe benefits we see are:
\nRelated patterns are Strategic Domain Driven Design as an approach to wisely cut your architecture according to useful bounded contexts and decide on the relevant communication and "translation" between the services.\nIn case you are looking for a small visualisation tool for your microservice architecture you might find vistecture useful.
\nAlso Resilience thinking is especially important when designing an application as a suite of microservices.
\n"}],"name":"microservices","title":"Microservices","ring":1,"quadrant":"methods-and-patterns","body":"We continue to belief in the microservices concept and its related patterns and best practices. However, it's worth mentioning that we we had to learn some lessons when it came to resilient thinking and deployment-related dependencies between microservices.
\nWe feel that our microservice-based applications are more robust than monolithic ones have been. Thanks to the \nsplit of the overall complexity into multiple services, new employees or team members are becoming productive within days or a few weeks.
\nIn order to get microservices right and to benefit from the advantages, there is a lot more required.\nThe following "pyramid of need" for microservices shows this:\n
Neo4j is one of the oldest Open Source Graph Databases. It's one of the rare NoSQL databases that is fully ACID-compliant. We see two main advantages of graph databases:
\nNeo4j database is implemented in Java and can therefore be embedded in your application if you live on the JVM.
\nYou can also choose to run it in a classic server mode, which then provides you with the possibility to either use its REST API or connect to it via the BOLT Driver, which has native bindings for the most popular languages.
\nThe cypher query language which comes with Neo4j is a declarative graph query language that allows for expressive and efficient querying and updating of the graph.
\nAt AOE, we use Neo4j mostly for explorative, interactive work with weakly structured or highly connected data, also we are evaluating this for knowledge-based recommendations in our Searchperience product.
\n"}],"name":"neo4j","title":"neo4j.md","quadrant":"platforms-and-aoe-services","body":"Neo4j is one of the oldest Open Source Graph Databases. It's one of the rare NoSQL databases that is fully ACID-compliant. We see two main advantages of graph databases:
\nNeo4j database is implemented in Java and can therefore be embedded in your application if you live on the JVM.
\nYou can also choose to run it in a classic server mode, which then provides you with the possibility to either use its REST API or connect to it via the BOLT Driver, which has native bindings for the most popular languages.
\nThe cypher query language which comes with Neo4j is a declarative graph query language that allows for expressive and efficient querying and updating of the graph.
\nAt AOE, we use Neo4j mostly for explorative, interactive work with weakly structured or highly connected data, also we are evaluating this for knowledge-based recommendations in our Searchperience product.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/neo4j.md"},{"flag":"new","featured":true,"revisions":[{"name":"next-js","release":"2019-11-01","title":"Next.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/next-js.md","body":"Next.js is a JavaScript and React based framework which makes use of server side rendering.
\n"}],"name":"next-js","title":"Next.js","ring":2,"quadrant":"languages-and-frameworks","body":"Next.js is a JavaScript and React based framework which makes use of server side rendering.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/next-js.md"},{"flag":"default","featured":true,"revisions":[{"name":"node-js","release":"2017-03-01","title":"node.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/node-js.md","body":"Node.js is a no- browser JavaScript execution runtime. Its basis is Google's V8 engine. Node is event-driven and follows a non-blocking I/O model.
\nIt’s a good choice for restful APIs, realtime purposes or situations where many concurrent connections are expected, where each connection has a lightweight memory footprint.
\nNode allows separation of concerns by using its package manager npm, which is also the largest ecosystem of Open Source libraries (modules).
\nModules are added as dependencies and offer a wide range of functionalities in a range from simple helper functions to mature web frameworks such as express.js.
\nMany PaaS providers (AWS, Google Cloud Platform, Azure) support node, including deployment and monitoring services out of the box for scalable stateless applications.
\nAt AOE, we successfully use node.js-based applications for smaller services or internal tools such dashboards.
\n"}],"name":"node-js","title":"node.js","ring":2,"quadrant":"languages-and-frameworks","body":"Node.js is a no- browser JavaScript execution runtime. Its basis is Google's V8 engine. Node is event-driven and follows a non-blocking I/O model.
\nIt’s a good choice for restful APIs, realtime purposes or situations where many concurrent connections are expected, where each connection has a lightweight memory footprint.
\nNode allows separation of concerns by using its package manager npm, which is also the largest ecosystem of Open Source libraries (modules).
\nModules are added as dependencies and offer a wide range of functionalities in a range from simple helper functions to mature web frameworks such as express.js.
\nMany PaaS providers (AWS, Google Cloud Platform, Azure) support node, including deployment and monitoring services out of the box for scalable stateless applications.
\nAt AOE, we successfully use node.js-based applications for smaller services or internal tools such dashboards.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/node-js.md"},{"flag":"new","featured":false,"revisions":[{"name":"nosql","release":"2019-11-01","title":"NoSQL","ring":2,"quadrant":"methods-and-patterns","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/nosql.md","body":"NoSQL technologies are established solutions that allows for scaling and handling big datasets.\nWe use Technologies like Redis, Elasticsearch and Neo4J but there are many others that are powering the NoSQL space.
\n"}],"name":"nosql","title":"NoSQL","ring":2,"quadrant":"methods-and-patterns","body":"NoSQL technologies are established solutions that allows for scaling and handling big datasets.\nWe use Technologies like Redis, Elasticsearch and Neo4J but there are many others that are powering the NoSQL space.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/nosql.md"},{"flag":"default","featured":false,"revisions":[{"name":"npm","release":"2017-03-01","title":"NPM","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/npm.md","body":"NPM is one of, if not the most, popular package manager for JavaScript. Because of the big community, you can find nearly every dependency in npm.
\nInstead of other package managers such as bower, you have to write your packages as modules. This unifies the way you have to use, test and, of course, understand dependencies.
\nNPM creates a tree for your dependencies and their nesting dependencies. Because of this, you don't need to handle version conflicts, since every dependency uses there own version of e.g. webpack.
\nWith shrinkwrap you have a robust tool to lock down and manage the versions of your dependencies - following the Pin (external) dependencies approach.
\nFor each package you have to classify your dependencies:
\nWith scripts you get support for the most common build lifecycle steps, e.g. build, start, test ...
\nOther useful features:
\nNPM is one of, if not the most, popular package manager for JavaScript. Because of the big community, you can find nearly every dependency in npm.
\nInstead of other package managers such as bower, you have to write your packages as modules. This unifies the way you have to use, test and, of course, understand dependencies.
\nNPM creates a tree for your dependencies and their nesting dependencies. Because of this, you don't need to handle version conflicts, since every dependency uses there own version of e.g. webpack.
\nWith shrinkwrap you have a robust tool to lock down and manage the versions of your dependencies - following the Pin (external) dependencies approach.
\nFor each package you have to classify your dependencies:
\nWith scripts you get support for the most common build lifecycle steps, e.g. build, start, test ...
\nOther useful features:
\nThe OpenAPI Specification is becoming a broadly adopted industry standard for describing modern REST APIs. Other initiatives like RAML have joined the OpenAPI Initiative.
\nOpenAPI v2 version is basically the former Swagger - and Swagger provides useful tools for OpenAPI like the online editor and viewer http://editor.swagger.io/\nWe have also found that this version currently have a good tool support accross languages, so you will find API client and server generation tools for a lot of languages, which makes it quite easy to connect to an API that is described in OpenAPI standard.
\nOpenAPI v3
\nOpenAPI v3 adds more features to the specification - for example the ability to describe APIs supporting request/callback pattern.
\nThere is a very good api designer https://www.apicur.io/ and a good mock generator http://microcks.github.io/index.html
\nThe general tool support is excellent. See https://openapi.tools/
\n"}],"name":"open-api","title":"Open API","ring":1,"quadrant":"tools","body":"The OpenAPI Specification is becoming a broadly adopted industry standard for describing modern REST APIs. Other initiatives like RAML have joined the OpenAPI Initiative.
\nOpenAPI v2 version is basically the former Swagger - and Swagger provides useful tools for OpenAPI like the online editor and viewer http://editor.swagger.io/\nWe have also found that this version currently have a good tool support accross languages, so you will find API client and server generation tools for a lot of languages, which makes it quite easy to connect to an API that is described in OpenAPI standard.
\nOpenAPI v3
\nOpenAPI v3 adds more features to the specification - for example the ability to describe APIs supporting request/callback pattern.
\nThere is a very good api designer https://www.apicur.io/ and a good mock generator http://microcks.github.io/index.html
\nThe general tool support is excellent. See https://openapi.tools/
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/open-api.md"},{"flag":"default","featured":false,"revisions":[{"name":"oro-platform","release":"2017-03-01","title":"Oro Platform","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/oro-platform.md","body":"OroPlatform is a framework built on Symfony 2 with the purpose of providing the features you need in every business application that is not your core business logic. Hence, it serves you with a basic application, providing login and complex security, menus and menu management, history, audit trails, settings management, etc. It comes complete with a design and many widgets to be utilized in own entities. Other Features of OroPlatform are, for example, a WebSocket server-driven user interface, queue-based task runners, REST Interface, as well as messaging- and workflow systems.
\nOne of the central features is that entities, which are to be managed within the system, can be set up completely by configuring them using the UI. This in itself implies that it puts another abstraction layer upon doctrine and symfony defaults.
\nAs with every framework or application, the general-purpose goals and abstraction comes with drawbacks: In fact, OroPlatform modifies and extends the common way of doing things in Symfony in several places, which makes the developer's life hard at times. Also, the UI and package managing are set in such a way that they are hard to extend or replace. The many additional abstraction layers can result in decreased performance.
\nOn the other hand, OroPlatform gives you a good headstart for prototyping and frees you from rebuilding common requirements - which makes it a relevant choice for business applications with the need to manage several entities in a backend. Also, projects such Akeneo or OroCRM use OroPlatform with success.
\nSince the project is still young, the future development and improvements need to be watched. We classified the Framework as Assess.
\n"}],"name":"oro-platform","title":"oro-platform.md","quadrant":"tools","body":"OroPlatform is a framework built on Symfony 2 with the purpose of providing the features you need in every business application that is not your core business logic. Hence, it serves you with a basic application, providing login and complex security, menus and menu management, history, audit trails, settings management, etc. It comes complete with a design and many widgets to be utilized in own entities. Other Features of OroPlatform are, for example, a WebSocket server-driven user interface, queue-based task runners, REST Interface, as well as messaging- and workflow systems.
\nOne of the central features is that entities, which are to be managed within the system, can be set up completely by configuring them using the UI. This in itself implies that it puts another abstraction layer upon doctrine and symfony defaults.
\nAs with every framework or application, the general-purpose goals and abstraction comes with drawbacks: In fact, OroPlatform modifies and extends the common way of doing things in Symfony in several places, which makes the developer's life hard at times. Also, the UI and package managing are set in such a way that they are hard to extend or replace. The many additional abstraction layers can result in decreased performance.
\nOn the other hand, OroPlatform gives you a good headstart for prototyping and frees you from rebuilding common requirements - which makes it a relevant choice for business applications with the need to manage several entities in a backend. Also, projects such Akeneo or OroCRM use OroPlatform with success.
\nSince the project is still young, the future development and improvements need to be watched. We classified the Framework as Assess.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/oro-platform.md"},{"flag":"new","featured":true,"revisions":[{"name":"packer","release":"2019-11-01","title":"Packer","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/packer.md","body":"Hashicorp Packer is a lightweight tool which automates the creation of any type of machine images (Machine Image As A Code) for multiple platforms. \nPacker is not a replacement of configuration management tools like Ansible. Packer works with tools like ansible to install software while creating images. \nPacker uses a configuration file to create a machine image. It uses the concepts of builders to spin up an instance, run provisioners to configure applications or services. \nOnce setup is done, it shuts the instance down and save new baked machine instance with any needed post-processing. \nPacker only builds images. But once you have them you can deploy your infrastructure quickly and even scale by spawning any number of instances without doing extra configuration. \nAnother benefit is, that machine images can be tested to verify if they are working correctly.\nPacker supports multiple cloud providers like AWS, GCP, Digital Ocean etc.
\nMachine images are important for modern deployment pipelines and fast ramp of of new infrastructure. \nWe are using Packer to build so called "Golden images" that are used in our Infrastructure as Code based provisionings.
\n"}],"name":"packer","title":"Packer","ring":1,"quadrant":"platforms-and-aoe-services","body":"Hashicorp Packer is a lightweight tool which automates the creation of any type of machine images (Machine Image As A Code) for multiple platforms. \nPacker is not a replacement of configuration management tools like Ansible. Packer works with tools like ansible to install software while creating images. \nPacker uses a configuration file to create a machine image. It uses the concepts of builders to spin up an instance, run provisioners to configure applications or services. \nOnce setup is done, it shuts the instance down and save new baked machine instance with any needed post-processing. \nPacker only builds images. But once you have them you can deploy your infrastructure quickly and even scale by spawning any number of instances without doing extra configuration. \nAnother benefit is, that machine images can be tested to verify if they are working correctly.\nPacker supports multiple cloud providers like AWS, GCP, Digital Ocean etc.
\nMachine images are important for modern deployment pipelines and fast ramp of of new infrastructure. \nWe are using Packer to build so called "Golden images" that are used in our Infrastructure as Code based provisionings.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/packer.md"},{"flag":"default","featured":true,"revisions":[{"name":"pact","release":"2018-03-01","title":"PACT","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pact.md","body":"PACT (http://pact.io/) is a family of frameworks that provides support for Consumer Driven Contract testing accross different langauages and frameworks.
\nConsumer Driven Contract testing is a pattern for testing interfaces/boundaries between services.
\nIt allows "consumers" to run tests against a defined Mock and record the defined interactions (=PACT).\nIt puts "providers" in the position to run the PACT tests inside theire Continuous Integration Pipelines, so that the provider knows if he might break any consumers.
\nThis approach makes sense in organisations where teams collaborate more closely (See Strategic Domain Driven Design ), e.g. to build Microservice oriented architectures
\nConsumer Driven Contract Testing and how it can be conducted with PACT is documented very nicely on the official PACT website: https://docs.pact.io/.
\n"}],"name":"pact","title":"PACT","ring":2,"quadrant":"tools","body":"PACT (http://pact.io/) is a family of frameworks that provides support for Consumer Driven Contract testing accross different langauages and frameworks.
\nConsumer Driven Contract testing is a pattern for testing interfaces/boundaries between services.
\nIt allows "consumers" to run tests against a defined Mock and record the defined interactions (=PACT).\nIt puts "providers" in the position to run the PACT tests inside theire Continuous Integration Pipelines, so that the provider knows if he might break any consumers.
\nThis approach makes sense in organisations where teams collaborate more closely (See Strategic Domain Driven Design ), e.g. to build Microservice oriented architectures
\nConsumer Driven Contract Testing and how it can be conducted with PACT is documented very nicely on the official PACT website: https://docs.pact.io/.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pact.md"},{"flag":"default","featured":false,"revisions":[{"name":"pair-working","release":"2017-03-01","title":"Pair working","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pair-working.md","body":"We summarized the practices of pair programming and administrating as pair working.
\nDerived as a practice from eXtreme Programming (XP), pair programming is a method/pattern that aims for fine-scaled feedback within a team.
\nAt AOE, some developers and operators work in pairs, not constantly, but from time to time. Most teams have positive experiences using this method, but not all teams tried the by-the-book-approach (driver and navigator principle). Especially for non-trival tasks, pair working results in rapid knowlegde exchange and better results with less bugs. We encourage the teams to try this approach more often.
\n"}],"name":"pair-working","title":"pair-working.md","quadrant":"methods-and-patterns","body":"We summarized the practices of pair programming and administrating as pair working.
\nDerived as a practice from eXtreme Programming (XP), pair programming is a method/pattern that aims for fine-scaled feedback within a team.
\nAt AOE, some developers and operators work in pairs, not constantly, but from time to time. Most teams have positive experiences using this method, but not all teams tried the by-the-book-approach (driver and navigator principle). Especially for non-trival tasks, pair working results in rapid knowlegde exchange and better results with less bugs. We encourage the teams to try this approach more often.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pair-working.md"},{"flag":"default","featured":false,"revisions":[{"name":"phan","release":"2017-03-01","title":"phan","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/phan.md","body":"Phan is a static code analyzer for PHP7, which is very fast, since it uses the PHP 7 AST (abstract syntax tree). Phan basically offers some of the safety that otherwise only compiled type-safe languages have - such as checking function references and return types.
\nWe expect at least the following benefits:
\nWe think Phan can be used in the deployment pipeline or as commit hooks for PHP 7-based applications. For a full Feature list check here.
\n"}],"name":"phan","title":"phan.md","quadrant":"tools","body":"Phan is a static code analyzer for PHP7, which is very fast, since it uses the PHP 7 AST (abstract syntax tree). Phan basically offers some of the safety that otherwise only compiled type-safe languages have - such as checking function references and return types.
\nWe expect at least the following benefits:
\nWe think Phan can be used in the deployment pipeline or as commit hooks for PHP 7-based applications. For a full Feature list check here.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/phan.md"},{"flag":"default","featured":false,"revisions":[{"name":"php7-over-php5","release":"2017-03-01","title":"PHP7 over PHP5","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/php7-over-php5.md","body":"PHP 5 has been around for a very long time, and can be considered as the PHP version that defined where PHP wants to go in the future.\nWith proper OOP, support for clojures and a steadily improving type system, it has become a very mature language.\nHowever, in the past 3 years, Facebook introduced HHVM, which became a major influence on PHP 7 and eventually brought a lot of improvements not only for the execution speed, but also with proper type hints and other features.
\nHere at AOE, we have numerous PHP projects, and we often kept it backwards-compatible to make sure that it will run on older systems. This is comparable to the procedure most frameworks (Magento, OroPlatform and derived projects) use.
\nNow, PHP 5 has reached its end--of-life, and it is time to discontinue the backqards-compatibility in favor of better and more stable applications.\nEven though we can use the PHP 7 runtime while being PHP 5-compatible, it is not considered good practice anymore, as we can now rely on the PHP 7 features and use all of its advantages.
\nOne of the major points PHP 7 supports is proper typehinting and return types (apart from PhpDocs), which makes static analysis much easier and can improve the overall code quality significantly.
\n"}],"name":"php7-over-php5","title":"php7-over-php5.md","quadrant":"languages-and-frameworks","body":"PHP 5 has been around for a very long time, and can be considered as the PHP version that defined where PHP wants to go in the future.\nWith proper OOP, support for clojures and a steadily improving type system, it has become a very mature language.\nHowever, in the past 3 years, Facebook introduced HHVM, which became a major influence on PHP 7 and eventually brought a lot of improvements not only for the execution speed, but also with proper type hints and other features.
\nHere at AOE, we have numerous PHP projects, and we often kept it backwards-compatible to make sure that it will run on older systems. This is comparable to the procedure most frameworks (Magento, OroPlatform and derived projects) use.
\nNow, PHP 5 has reached its end--of-life, and it is time to discontinue the backqards-compatibility in favor of better and more stable applications.\nEven though we can use the PHP 7 runtime while being PHP 5-compatible, it is not considered good practice anymore, as we can now rely on the PHP 7 features and use all of its advantages.
\nOne of the major points PHP 7 supports is proper typehinting and return types (apart from PhpDocs), which makes static analysis much easier and can improve the overall code quality significantly.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/php7-over-php5.md"},{"flag":"default","featured":true,"revisions":[{"name":"pin-external-dependencies","release":"2017-03-01","title":"Pin external dependencies","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pin-external-dependencies.md","body":"A lot of applications have dependencies on other modules or components. We have\nused different approaches regarding how and when these dependencies are resolved\nand have agreed on using a method we call "Pin (External) dependencies".
\nThis is especially relevant for script languages, where the dependency\nmanagement references the code and not immutable prebuild binaries - and\ntherefore resolves the complete transient dependencies on the fly.
\nMost of these package- or dependency management solutions support two artefacts:
\nWe suggest the following:
\nFor updating of dependencies define a process in the team. This can either be\ndone on the dev-system or in a seperate automated CI job - both resulting in\nupdated dependency definitions in the applications VCS.
\n"}],"name":"pin-external-dependencies","title":"Pin external dependencies","ring":1,"quadrant":"methods-and-patterns","body":"A lot of applications have dependencies on other modules or components. We have\nused different approaches regarding how and when these dependencies are resolved\nand have agreed on using a method we call "Pin (External) dependencies".
\nThis is especially relevant for script languages, where the dependency\nmanagement references the code and not immutable prebuild binaries - and\ntherefore resolves the complete transient dependencies on the fly.
\nMost of these package- or dependency management solutions support two artefacts:
\nWe suggest the following:
\nFor updating of dependencies define a process in the team. This can either be\ndone on the dev-system or in a seperate automated CI job - both resulting in\nupdated dependency definitions in the applications VCS.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pin-external-dependencies.md"},{"flag":"default","featured":true,"revisions":[{"name":"pipeline-as-code","release":"2018-03-01","title":"Pipeline as Code","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pipeline-as-code.md","body":"We moved this pattern to adopt, because it is used by nearly every team and project now and is an important part of our automation.
\nFor Jenkins, we often use a mix of Job DSL and Jenkins Pipelines and recently also used Gitlab Pipelines.
\n"},{"name":"pipeline-as-code","release":"2017-03-01","title":"Pipeline as Code","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pipeline-as-code.md","body":"Continuous Integration and Delivery is a critical part of our development and deployment process at AOE. Using Jenkins for many years the "instructions" how to build, test and deploy applications were scattered between many custom scripts and the pipeline was often maintained by manual maintenance of Jenkins jobs. Soon, we realized that we need a more native way to express the full CI/CD pipeline process in code and manage it in version control.
\nBeing an important part of each project, the pipeline configuration should be managed as code and rolled out automatically - this also allows us to manage the pipeline itself applying the same standards that apply to application code.
\nWhile some teams started using Jenkins' JobDSL plugin, others explored the new Jenkins Pipeline - in both ways, the build artifacts should be published to an artifact repository such as Artifactory.
\n"}],"name":"pipeline-as-code","title":"Pipeline as Code","ring":1,"quadrant":"methods-and-patterns","body":"We moved this pattern to adopt, because it is used by nearly every team and project now and is an important part of our automation.
\nFor Jenkins, we often use a mix of Job DSL and Jenkins Pipelines and recently also used Gitlab Pipelines.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pipeline-as-code.md"},{"flag":"new","featured":true,"revisions":[{"name":"plant-uml","release":"2019-11-01","title":"Plant UML","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/plant-uml.md","body":"PlantUML is an open source project that allows to create UML diagrams in a text-based and declarative way.
\nSince it is integrated in tools like Confluence, IntelliJ and Gitlab we use it a lot to quickly document results of software design sessions.
\nAnother similar tools that use just plain javascript to render the diagrams is mermaid
\n"}],"name":"plant-uml","title":"Plant UML","ring":2,"quadrant":"tools","body":"PlantUML is an open source project that allows to create UML diagrams in a text-based and declarative way.
\nSince it is integrated in tools like Confluence, IntelliJ and Gitlab we use it a lot to quickly document results of software design sessions.
\nAnother similar tools that use just plain javascript to render the diagrams is mermaid
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/plant-uml.md"},{"flag":"default","featured":false,"revisions":[{"name":"play-framework","release":"2017-03-01","title":"Play Framework","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/play-framework.md","body":"The Play Framework is a lightweight (web)application framework for Java and Scala programmers.
\nA developer can choose from different modules to include necessary functionality such s accessing http resources, databases, and so on. As a consequence, the developer can choose, and is not distracted by or clobbered with irrelevant things. This approach is considered as minimalistic, but it is easy to include necessary functionality.
\nRegarding the architecture, Play is stateless and built on Akka. As a consequence, Play applications have much lower resource consumption regarding CPU und memory and can scale easily. Play manages concurrency without binding a request to a thread until the response is ready.
\nWith the use of "Futures" in your code you can turn synchronous tasks (such as IO or API call to another service) into asynchronous and you can build non-blocking applications. It is recommended to understand the principles Play uses to achieve performance and scalability.
\nPlay can act as backend service delivering JSON, for esample. For building web applications. the Twirl template engine enables server-side rendering of html pages. These html pages can include css and java script parts of your own choice.
\n"}],"name":"play-framework","title":"play-framework.md","quadrant":"languages-and-frameworks","body":"The Play Framework is a lightweight (web)application framework for Java and Scala programmers.
\nA developer can choose from different modules to include necessary functionality such s accessing http resources, databases, and so on. As a consequence, the developer can choose, and is not distracted by or clobbered with irrelevant things. This approach is considered as minimalistic, but it is easy to include necessary functionality.
\nRegarding the architecture, Play is stateless and built on Akka. As a consequence, Play applications have much lower resource consumption regarding CPU und memory and can scale easily. Play manages concurrency without binding a request to a thread until the response is ready.
\nWith the use of "Futures" in your code you can turn synchronous tasks (such as IO or API call to another service) into asynchronous and you can build non-blocking applications. It is recommended to understand the principles Play uses to achieve performance and scalability.
\nPlay can act as backend service delivering JSON, for esample. For building web applications. the Twirl template engine enables server-side rendering of html pages. These html pages can include css and java script parts of your own choice.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/play-framework.md"},{"flag":"changed","featured":true,"revisions":[{"name":"ports-and-adapters","release":"2019-11-01","title":"Ports and Adapters","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ports-and-adapters.md","body":"Updated to "adopt"
\n"},{"name":"ports-and-adapters","release":"2018-03-01","title":"Ports and Adapters","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/ports-and-adapters.md","body":"Ports and Adapters is an architecture or layering approach for software design. As with other layering approaches, it seperates different concerns in different layers, where dependencies are only allowed from the outside to the inside.
\nWe use "ports and adapters" with success for (larger) applications, which contain certain business logic and/or provide several ways to access the services.\nWe often use the approach hand-in-hand with Domain Driven Design. In comparison with other layering patterns (e.g. layered architecture) it allows you to have a true technology-free core (domain) model. Why? Because, with the concept of "secondary ports" (=interfaces), it inverts the control and allows outer layers to provide adapters (=implementations of the defined interface).\nIt also defines clear boundaries regarding where to put what logic of your application.
\nYou can find out more about the details and its origins in well-known blog posts such as The Clean Architecture or Hexagonal architecture
\nIn short, here is how we often layer such applications:
\nThese layers belong to every bounded context (modules) inside the application.
\nAre you searching for a potential timeless architecture for your critical application? Try implementing a potent technology-free domain model in the core layer and use ports and adapters to layer your application.
\n"}],"name":"ports-and-adapters","title":"Ports and Adapters","ring":1,"quadrant":"methods-and-patterns","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ports-and-adapters.md"},{"flag":"default","featured":true,"revisions":[{"name":"postcss","release":"2017-03-01","title":"PostCSS","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/postcss.md","body":"PostCSS is a tool for transforming stylesheets with JavaScript plugins. It comes with a parser that reads your CSS file into an AST, pipes it through the loaded plugins and finally\nstringifies it back into a (transformed) CSS output file.
\nWe at AOE love PostCSS because it gives us the power to use CSS Modules, which finally ends the curse of global CSS.
\nIt also has a huge list of more than 350 other available plugins.\nSure, not all of them are useful, but the sheer number of plugins shows how easy it is to write your own plugin for it.\nIn fact, it´s just a matter of writing a single JS function.
\nFinally, PostCSS is very fast and easy to setup because it runs 100% in JavaScript.\nCompared to SASS as a preprocessor, it feels much more powerful but at the same time less bloated with superfluous functionality because everything comes in its own little plugin
\n"}],"name":"postcss","title":"PostCSS","ring":1,"quadrant":"languages-and-frameworks","body":"PostCSS is a tool for transforming stylesheets with JavaScript plugins. It comes with a parser that reads your CSS file into an AST, pipes it through the loaded plugins and finally\nstringifies it back into a (transformed) CSS output file.
\nWe at AOE love PostCSS because it gives us the power to use CSS Modules, which finally ends the curse of global CSS.
\nIt also has a huge list of more than 350 other available plugins.\nSure, not all of them are useful, but the sheer number of plugins shows how easy it is to write your own plugin for it.\nIn fact, it´s just a matter of writing a single JS function.
\nFinally, PostCSS is very fast and easy to setup because it runs 100% in JavaScript.\nCompared to SASS as a preprocessor, it feels much more powerful but at the same time less bloated with superfluous functionality because everything comes in its own little plugin
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/postcss.md"},{"flag":"new","featured":false,"revisions":[{"name":"postgres","release":"2019-11-01","title":"PostgreSQL","ring":1,"quadrant":"tools","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postgres.md","body":"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
\n"}],"name":"postgres","title":"PostgreSQL","ring":1,"quadrant":"tools","body":"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postgres.md"},{"flag":"new","featured":false,"revisions":[{"name":"postman","release":"2019-11-01","title":"Postman","ring":2,"quadrant":"tools","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postman.md","body":"Postman is an API testing and documentation tool. Requests can be bundled into folders \nand easily be configured to be executed against multiple environments. Responses can be evaluated using the "test" feature.
\nEven automated testing is possible using Newman as an addition to Postman.
\n"}],"name":"postman","title":"Postman","ring":2,"quadrant":"tools","body":"Postman is an API testing and documentation tool. Requests can be bundled into folders \nand easily be configured to be executed against multiple environments. Responses can be evaluated using the "test" feature.
\nEven automated testing is possible using Newman as an addition to Postman.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postman.md"},{"flag":"default","featured":false,"revisions":[{"name":"protobuf","release":"2017-03-01","title":"Protobuf","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/protobuf.md","body":"In an increasingly microservice-oriented environment, it is crucial that all parties agree on a common language and wire format for data exchange.
\nJSON and XML are two well-known formats for serialization of data; however, they come with a few drawbacks. JSON is completely dynamic without any validation (though there is json-schema) and XML uses an extremely heavyweight syntax, which carries a huge overhead, so parsing and transport becomes quite slow.
\nProtobuf, amongst others, is an approach to solving this problem by using well-defined schemas to create language-specific code, which serializes/marshals and deserializes/unmarshals data. One of the key features is the built-in support for evolving schemas; it is easily possible to incrementally extend the definition while staying backwards-compatible and compose messages consisting of several sub-messages.
\nIf you are looking for a way to have different systems agree on a common protocol on top of a transport layer (such as AMQP or HTTP), Protobuf is definitely worth examining more closely and should be assessed.
\n"}],"name":"protobuf","title":"protobuf.md","quadrant":"languages-and-frameworks","body":"In an increasingly microservice-oriented environment, it is crucial that all parties agree on a common language and wire format for data exchange.
\nJSON and XML are two well-known formats for serialization of data; however, they come with a few drawbacks. JSON is completely dynamic without any validation (though there is json-schema) and XML uses an extremely heavyweight syntax, which carries a huge overhead, so parsing and transport becomes quite slow.
\nProtobuf, amongst others, is an approach to solving this problem by using well-defined schemas to create language-specific code, which serializes/marshals and deserializes/unmarshals data. One of the key features is the built-in support for evolving schemas; it is easily possible to incrementally extend the definition while staying backwards-compatible and compose messages consisting of several sub-messages.
\nIf you are looking for a way to have different systems agree on a common protocol on top of a transport layer (such as AMQP or HTTP), Protobuf is definitely worth examining more closely and should be assessed.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/protobuf.md"},{"flag":"default","featured":false,"revisions":[{"name":"puppet-environments","release":"2018-03-01","title":"Puppet Environments","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/puppet-environments.md","body":"Puppet Environments has proven to work well for our projects using Puppet.
\n"},{"name":"puppet-environments","release":"2017-03-01","title":"Puppet Environments","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/puppet-environments.md","body":"Puppet is an Open Source configuration management tool. It is used by a wide range of different companies world-wide, e.g. the Wikimedia Foundation, Mozilla, Reddit, CERN, Dell, Rackspace, Twitter, the New York Stock Exchange, PayPal, Disney, Citrix Systems, Spotify, Oracle, the University of California Los Angeles, the University of North Texas, QVC, Intel, Google and others.
\nPuppet has been the basic tool to address Continuous Configuration Automation (CCA) in AOE's Infrastructure as Code strategy (IaC) for more than 4 years.
\nIntended to give projects the means to develop and maintain their own infrastructure, separated and not influenced by other projects, Puppet environments, together with Puppet module versioning and ENC, have been introduced.\\\nPuppet Environments are rated "Trial". It supports our strategy of Infrastructure as Code (IaC) and links it to our DevOps approach, enabling project teams to set up and customize their own infrastructure.
\nTeams that want to use the Puppet Environments service from the AOE IT Team will find detailed information about the implemented CI/CD process for this.
\n"}],"name":"puppet-environments","title":"puppet-environments.md","quadrant":"platforms-and-aoe-services","body":"Puppet Environments has proven to work well for our projects using Puppet.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/puppet-environments.md"},{"flag":"changed","featured":true,"revisions":[{"name":"rabbitmq","release":"2019-11-01","title":"RabbitMQ","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/rabbitmq.md","body":"RabbitMQ has proven to work very well for messaging in our projects, thats why we updated it to "adopt".
\n"},{"name":"rabbitmq","release":"2017-03-01","title":"RabbitMQ","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rabbitmq.md","body":"RabbitMQ is an Open Source message broker - implementing the Advanced Message Queuing Protocol (AMQP) protocol. It provides a reliable and scalable way to transport data between loosely coupled applications, using different EAI patterns such as the Publish & Subscriber pattern. AMQP supports direct and fan-out exchanges (broadcasts) as well as topics. Queuing mechanisms allow for robust architectures, mitigating the risks of application downtimes. Typically, a RabbitMQ server can easily buffer millions of messages. RabbitMQ supports JMS in addition to AMQP. It is not intended to use JMS for new systems, but it makes RabbitMQ useful for integrating legacy systems.
\nThere are several alternative solutions to RabbitMQ, e. g. the free Apache ActiveMQ, which is integrated in Anypoint platform. ActiveMQ implements a somewhat simpler routing concept than RabbitMQ, but offers more protocols. Commercial products in this area are offered by IBM (Websphere MQ), Fiorano and almost every vendor of ESB products.
\nWe use RabbitMQ internally for transferring messages safely in our logging ecosystem between Logstash proxies and servers using direct and fan-out exchanges for delivering messages to appropriate destinations. RabbitMQ is also used to asynchronously trigger Jenkins jobs from our SCMs to mitigate heavy load on the SCMs, usually caused by Jenkins polls for SCM changes. Additionally, some critical events for monitoring are using RabbitMQ for guaranteed notification.
\nRabbitMQ is rated "Trial". It fits into our approach to build robust, resilient systems and use asyncronous messages for loosely coupled communications between components. In practice, RabbitMQ proved to be stable and dealt well with service interruptions from failures and maintenance slots. A common pain point is RabbitMQ as a single point of failure disrupting the data flow in a system. This issue is currently approached by setting up a HA cluster for RabbitMQ. The outcome of this approach will clarify the extent of future usage of RabbitMQ in our systems.
\n 
RabbitMQ has proven to work very well for messaging in our projects, thats why we updated it to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/rabbitmq.md"},{"flag":"changed","featured":true,"revisions":[{"name":"raml","release":"2019-11-01","title":"RAML","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/raml.md","body":"Since the RAML project has decided to join the OpenAPI initiative and the RAML ecosystem lacks further development and additional tools, we decided to use and recommend using "OpenAPI specififcation (OAS)" as description standard instead.
\nRAML still provides advantages in modeling an API through it's more expressive modeling language and can produce OAS
\n"},{"name":"raml","release":"2017-03-01","title":"RAML","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/raml.md","body":"RAML (the RESTful API Modelling Language) is a YAML-based API specification language. It's now available in version 1.0. The philosophy behind it is to specify the API before implementation.
\nIf you follow this philosophy, you can design your API and discuss it with your clients and team before implementing a single line of code. API consumers are able to implement against the API before it's really up and running. The api-console provides a beautiful online documentation with "try it" features for your raml definition.
\nThe RAML ecosystem provides a rich toolset for code generation (e.g. online editor; api-workbench), automatically generated documentation, code generation (e.g. go-raml), mocking, testing and much more. We prefer RAML over Swagger because of this.
\n"}],"name":"raml","title":"RAML","ring":4,"quadrant":"languages-and-frameworks","body":"Since the RAML project has decided to join the OpenAPI initiative and the RAML ecosystem lacks further development and additional tools, we decided to use and recommend using "OpenAPI specififcation (OAS)" as description standard instead.
\nRAML still provides advantages in modeling an API through it's more expressive modeling language and can produce OAS
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/raml.md"},{"flag":"default","featured":true,"revisions":[{"name":"react","release":"2018-03-01","title":"React.js","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/react.md","body":"The past months have shown that React is still a great fit for us for frontend-heavy\napplications. With its rewritten core in version 16, Facebook shows how\nimportant this framework is for them. Therefore, Facebook is investing a lot of effort into React and a\nhealthy community. In addition, we REALLY enjoy writing React\ncomponents – so much so, that we have to move this library into adopt!
\n"},{"name":"react","release":"2017-03-01","title":"React.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/react.md","body":"React claims to be "the V in MVC". But for us it is much more than that. React\nimproved the way we approach frontend applications as we build them. Its\nfunctional way of writing components and its declarative JSX syntax help us to\nbuild interactive UIs very efficiently. React's one-way data flow keeps\neverything modular and fast and makes even large applications more readable.
\nComponents are the central point of React - once we fully started\nthinking in react,\nour components became smaller, more reusable and better testable.
\nAfter some 1.5 years of experience with React and the steady growth of the\ncommunity and ecosystem around it, we can confidently say that we still see\ngreat protential to build upcoming projects with React.
\n"}],"name":"react","title":"React.js","ring":1,"quadrant":"languages-and-frameworks","body":"The past months have shown that React is still a great fit for us for frontend-heavy\napplications. With its rewritten core in version 16, Facebook shows how\nimportant this framework is for them. Therefore, Facebook is investing a lot of effort into React and a\nhealthy community. In addition, we REALLY enjoy writing React\ncomponents – so much so, that we have to move this library into adopt!
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/react.md"},{"flag":"changed","featured":true,"revisions":[{"name":"reactive-programming","release":"2019-11-01","title":"Reactive Programming","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/reactive-programming.md","body":"The reactive style of programming promotes event-based thinking and modeling -- \nand by that assists in creating more decoupled solutions.
\nSynergies arise, when people understand the concepts of this pattern: by using marble diagrams, \nwhich are a de-facto standard in visualizing algorithms in a reactive style, a common ground for communication \nis available regardless of the programming language used.
\nWhen appropriate, we choose more explicitly the Reactive Programming pattern and therefore moved this to "adopt".
\n"},{"name":"reactive-programming","release":"2018-03-01","title":"Reactive Programming","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/reactive-programming.md","body":"Classic (web-) applications typically consist of transactions that submit\nlarge forms to the server side, which then processes these and, in response, returns HTML\nfor the browser to render. Today's applications have more and more\nfine-grained 'real-time'-like aspects: A simple modification of a form field\ncould trigger a complete roundtrip to the server including other services and\npersistence. Naturally, all of these transactions should respect the\nexpectations of a user who wants a highly interactive application.
\n"Reactive Programming" tries to provide an answer to the challanges mentioned above\nby raising the level of abstraction. This allows you to focus on the stream of\nevents that make up your business logic in a responsive, asynchronous fashion.
\nThere are various descriptions of what Reactive Programming actually is - at\nthe most general level it is programming with asynchronous data streams and\ncontains tools to create, manipulate, combine and filter these streams. Under the term\n"Reactive Programming", we summarize the principles and implementations that\nunderlie ReactiveX and the Reactive\nManifesto.
\n"Reactive Programming" is employed in many of our services – frontend and\nbackend – but not always as an explicitly choosen pattern. As different\nplattforms have different means to tackle this style of programming, we choose\nto include "Reactive Programming" as a general Method and Patterns Item in\naddition to concrete libraries and APIs such as\nRx.JS or Akka\nStreams to highlight the\nimportance of the approach in general.
\n"}],"name":"reactive-programming","title":"Reactive Programming","ring":1,"quadrant":"methods-and-patterns","body":"The reactive style of programming promotes event-based thinking and modeling -- \nand by that assists in creating more decoupled solutions.
\nSynergies arise, when people understand the concepts of this pattern: by using marble diagrams, \nwhich are a de-facto standard in visualizing algorithms in a reactive style, a common ground for communication \nis available regardless of the programming language used.
\nWhen appropriate, we choose more explicitly the Reactive Programming pattern and therefore moved this to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/reactive-programming.md"},{"flag":"default","featured":true,"revisions":[{"name":"redux","release":"2017-03-01","title":"Redux","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/redux.md","body":"Redux helps us to maintain state in our frontend applications in a more predictable and clearer way. It is extendable though middleware, it has a great documentation and some awesome devtools that are especially helpful when you are new to Redux.
\nThe functional concepts for updating the state, combined with immutable data, lead to extremely easy and enjoyable unit tests - this is maybe the biggest plus for us developers.
\nThe official react-redux bindings also made it straightforward to weave Redux into our React applications. For asynchronous actions we use redux-sagas which has proven itself as a better alternative for redux-thunk.
\nCurrently, we use Redux only in our React projects, but we are evaluating it together with other frameworks such as Angular or Vue.js, as well.
\n"}],"name":"redux","title":"Redux","ring":2,"quadrant":"languages-and-frameworks","body":"Redux helps us to maintain state in our frontend applications in a more predictable and clearer way. It is extendable though middleware, it has a great documentation and some awesome devtools that are especially helpful when you are new to Redux.
\nThe functional concepts for updating the state, combined with immutable data, lead to extremely easy and enjoyable unit tests - this is maybe the biggest plus for us developers.
\nThe official react-redux bindings also made it straightforward to weave Redux into our React applications. For asynchronous actions we use redux-sagas which has proven itself as a better alternative for redux-thunk.
\nCurrently, we use Redux only in our React projects, but we are evaluating it together with other frameworks such as Angular or Vue.js, as well.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/redux.md"},{"flag":"default","featured":true,"revisions":[{"name":"resilience-thinking","release":"2017-03-01","title":"Resilience thinking","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/resilience-thinking.md","body":"Resilience is the cabability of an application or service to resist different error scenarios. Especially for distributed systems - where a lot of communication between different services happen - it's very important to explicitly think of implementing resilience.
\nThere are a lot of different resilience patterns and it is also a matter of the overall software design. Typical patterns and methods used are:
\n"Embrace Errors" should be the mindset - because its not a question if errors appear - it's just a question of when.
\n"}],"name":"resilience-thinking","title":"Resilience thinking","ring":2,"quadrant":"methods-and-patterns","body":"Resilience is the cabability of an application or service to resist different error scenarios. Especially for distributed systems - where a lot of communication between different services happen - it's very important to explicitly think of implementing resilience.
\nThere are a lot of different resilience patterns and it is also a matter of the overall software design. Typical patterns and methods used are:
\n"Embrace Errors" should be the mindset - because its not a question if errors appear - it's just a question of when.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/resilience-thinking.md"},{"flag":"default","featured":true,"revisions":[{"name":"rest-assured","release":"2017-03-01","title":"Rest Assured (Testing)","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rest-assured.md","body":"REST-assured is a Java DSL for simplifying testing of REST-based services built on top of HTTP Builder. It supports the most important http request methods and can be used to validate and verify the response of these requests.
\nAt AOE, we use REST-assured with Spock to automate our API testing. We appreciate the easy-to-use DSL, which uses the Given-When-Then template (also known as Gherkin language). This template helps other project members to understand the code/test easily.
\nBecause of the seamless integration with Spock and our positive experience in one of our major projects, we classify REST-assured as assess.
\n"}],"name":"rest-assured","title":"Rest Assured (Testing)","ring":3,"quadrant":"tools","body":"REST-assured is a Java DSL for simplifying testing of REST-based services built on top of HTTP Builder. It supports the most important http request methods and can be used to validate and verify the response of these requests.
\nAt AOE, we use REST-assured with Spock to automate our API testing. We appreciate the easy-to-use DSL, which uses the Given-When-Then template (also known as Gherkin language). This template helps other project members to understand the code/test easily.
\nBecause of the seamless integration with Spock and our positive experience in one of our major projects, we classify REST-assured as assess.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rest-assured.md"},{"flag":"default","featured":true,"revisions":[{"name":"rxjava","release":"2017-03-01","title":"RxJava","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjava.md","body":"RxJava is the Open Source Java implementation of ReactiveX. The main concept heavily relies on the Observer- (and Subscriber)-Pattern. An Observer emits a stream of data, which can be consumed by Subscribers. The Subscriber reacts (That's where the 'Rx' comes from) asynchronously to those data events. Reactive Extensions were originally developed by Mircosoft's Erik Meijer and his team and have been ported to all major programming languages after being released to the public as Open Source software. We use RxJava (but actually RxAndroid to be precise) in the Congstar Android App to let the UI layer react to changes in the underlaying data layer.
\n"}],"name":"rxjava","title":"RxJava","ring":2,"quadrant":"tools","body":"RxJava is the Open Source Java implementation of ReactiveX. The main concept heavily relies on the Observer- (and Subscriber)-Pattern. An Observer emits a stream of data, which can be consumed by Subscribers. The Subscriber reacts (That's where the 'Rx' comes from) asynchronously to those data events. Reactive Extensions were originally developed by Mircosoft's Erik Meijer and his team and have been ported to all major programming languages after being released to the public as Open Source software. We use RxJava (but actually RxAndroid to be precise) in the Congstar Android App to let the UI layer react to changes in the underlaying data layer.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjava.md"},{"flag":"default","featured":true,"revisions":[{"name":"rxjs","release":"2017-03-01","title":"RxJs","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjs.md","body":"RX/JS aka reactive streams
\nRxJS is an implementation for the reactive programming paradigm which implements mostly the observer and iterator\npattern and follows the functional programming ideas. The pattern actually got a renaissance because it's not completely\nnew but has new implementations in many frameworks and languages like Angular, Akka, Spring and many more. Reason for \nthat attention actually is (in the javascript world), that observables can be cancelled (by rules too) and observables\ncan pass (stream) data on multiple events. Both aspects are not well realizable using promises e.g. and both were also\ndetected as a huge limitation in the JavaScript community — and so it's worth to get an understanding for reactive\nprogramming in general.
\nWe at AOE actually use RxJS in combination with Angular and think that it's worth to dive deeper into this paradigm.
\n"}],"name":"rxjs","title":"RxJs","ring":2,"quadrant":"languages-and-frameworks","body":"RX/JS aka reactive streams
\nRxJS is an implementation for the reactive programming paradigm which implements mostly the observer and iterator\npattern and follows the functional programming ideas. The pattern actually got a renaissance because it's not completely\nnew but has new implementations in many frameworks and languages like Angular, Akka, Spring and many more. Reason for \nthat attention actually is (in the javascript world), that observables can be cancelled (by rules too) and observables\ncan pass (stream) data on multiple events. Both aspects are not well realizable using promises e.g. and both were also\ndetected as a huge limitation in the JavaScript community — and so it's worth to get an understanding for reactive\nprogramming in general.
\nWe at AOE actually use RxJS in combination with Angular and think that it's worth to dive deeper into this paradigm.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjs.md"},{"flag":"default","featured":true,"revisions":[{"name":"sass","release":"2017-03-01","title":"SASS","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sass.md","body":"SASS (Syntactically Awesome Style-Sheets) is an extension to native CSS, which, as a preprocessor, simplifies the generation of CSS by offering features that enable developers to more efficiently write robust, better readable and maintainable CSS.
\nCore features of SASS are:
\nSASS has been widely adopted for many years and has evolved to an industry-standard backed by an active community since 2006.
\nThe learning curve is very smooth as SASS is fully compatible to CSS, meaning that all features are optional: Starting with SASS is as easy as renaming .css-files to .scss in a first step and then refactoring it step-by-step with the use of SASS features.
\nAt AOE, SASS has been recommended by the frontend COI and is used in nearly every current project.
\nMore information:
\n\n"}],"name":"sass","title":"SASS","ring":1,"quadrant":"languages-and-frameworks","body":"SASS (Syntactically Awesome Style-Sheets) is an extension to native CSS, which, as a preprocessor, simplifies the generation of CSS by offering features that enable developers to more efficiently write robust, better readable and maintainable CSS.
\nCore features of SASS are:
\nSASS has been widely adopted for many years and has evolved to an industry-standard backed by an active community since 2006.
\nThe learning curve is very smooth as SASS is fully compatible to CSS, meaning that all features are optional: Starting with SASS is as easy as renaming .css-files to .scss in a first step and then refactoring it step-by-step with the use of SASS features.
\nAt AOE, SASS has been recommended by the frontend COI and is used in nearly every current project.
\nMore information:
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sass.md"},{"flag":"default","featured":true,"revisions":[{"name":"scala-lang","release":"2018-03-01","title":"Scala Lang","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/scala-lang.md","body":"Scala is used in many projects at AOE. We have therefore moved it to the adopt level.
\n"},{"name":"scala-lang","release":"2017-03-01","title":"Scala Lang","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/scala-lang.md","body":"Besides Java, Scala is the most mature language on the Java Virtual Machine. Its unique blend of object-oriented and functional language features and rich type system with advanced type inference enables one to write concise code.
\nIt is fully interoperable with Java but has a big ecosystem of tools and frameworks on its own.
\nScala provides one of the best high-level concurrency- and async features on the language level as well as on the framework level, making it the default choice of twitter and the like.
\nAt AOE, we already use Scala in various projects to create scalable backend systems (Play, Akka) or for batch processing (Spark).
\n"}],"name":"scala-lang","title":"Scala Lang","ring":1,"quadrant":"languages-and-frameworks","body":"Scala is used in many projects at AOE. We have therefore moved it to the adopt level.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/scala-lang.md"},{"flag":"changed","featured":true,"revisions":[{"name":"self-service-infrastructure","release":"2019-11-01","title":"Self-service infrastructure","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/self-service-infrastructure.md","body":"Moved to "trial".
\n"},{"name":"self-service-infrastructure","release":"2018-03-01","title":"Self-service infrastructure","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/self-service-infrastructure.md","body":"With growing teams, growing projects and growing infrastructures, we decided to follow the "You build it, you run it" approach, and when we started to run Kubernetes, where we have a great abstraction layer between infrastructure and applications, we decided to make the developer teams write their own Helm charts.\nBy agreeing on just a couple of patters, this allows us to easily manage a microservice architecture with more than 60 Applications, without too much hassle managing infrastructure/runtimes for (among others) JVM, Go and PHP applications.\nMost of the hosting/provisioning decisions are better kept within the team, as the teams know how their applications work. By providing a clear interface, this became the cornerstone for running our microservice architecture, and keeping the amount of actual servers much lower than in projects with a centralized operations/IT team.
\nEventually, self-service infrastructure, and "You build it, you run it", allowed us to give both our application developers as well as our infrastructure engineers more flexibility than one team explaining to another team what to do, resulting in a better collaboration than before.
\n"}],"name":"self-service-infrastructure","title":"Self-service infrastructure","ring":2,"quadrant":"platforms-and-aoe-services","body":"Moved to "trial".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/self-service-infrastructure.md"},{"flag":"default","featured":true,"revisions":[{"name":"settings-injection","release":"2017-03-01","title":"Settings Injection","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/settings-injection.md","body":"While deploying applications to an environment, the application typically needs to be configured for that specific environment. Typical settings include domain names, database credentials and the location of other dependent services such as cache backends, queues or session storages.
\nThese settings should not be shipped with the build package. Instead, it's the environment - this build is being deployed to - that should expose these values to application. A common way to "inject" these values is by making them available as environment variables or dynamically creating configuration files for the application. You can achieve this pattern without special tools - but this concept of settings injection also works with tools such as Consul, kubernetes (with configMaps and secrets) or YAD.
\nIn this manner, the build package can be independent from the environment it's being deployed to - making it easier to follow the "Build once, deploy often" CI/CD principle.
\n"}],"name":"settings-injection","title":"Settings Injection","ring":1,"quadrant":"methods-and-patterns","body":"While deploying applications to an environment, the application typically needs to be configured for that specific environment. Typical settings include domain names, database credentials and the location of other dependent services such as cache backends, queues or session storages.
\nThese settings should not be shipped with the build package. Instead, it's the environment - this build is being deployed to - that should expose these values to application. A common way to "inject" these values is by making them available as environment variables or dynamically creating configuration files for the application. You can achieve this pattern without special tools - but this concept of settings injection also works with tools such as Consul, kubernetes (with configMaps and secrets) or YAD.
\nIn this manner, the build package can be independent from the environment it's being deployed to - making it easier to follow the "Build once, deploy often" CI/CD principle.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/settings-injection.md"},{"flag":"changed","featured":true,"revisions":[{"name":"sonarqube","release":"2019-11-01","title":"SonarQube","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/sonarqube.md","body":"At AOE, we are using SonarQube to get a historical overview of the code quality in our Projects. With SonarQube, you can get a quick insight into the condition of your code. It analyzes many languages and provides numerous static analysis rules.\nSonarQube is also being used for Static Application Security Testing (SAST) which scans our code for potential security vulnerabilities and is an essential element of our Secure Software Development Lifecycle.
\n"},{"name":"sonarqube","release":"2018-03-01","title":"SonarQube","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/sonarqube.md","body":"At AOE, we're evaluating SonarQube to get an historical overview of the code quality of our Projects. With SonarQube, you can get a quick hint about the condition of your code. It analyzes many languages and provides numerous static analysis rules.
\n"}],"name":"sonarqube","title":"SonarQube","ring":2,"quadrant":"tools","body":"At AOE, we are using SonarQube to get a historical overview of the code quality in our Projects. With SonarQube, you can get a quick insight into the condition of your code. It analyzes many languages and provides numerous static analysis rules.\nSonarQube is also being used for Static Application Security Testing (SAST) which scans our code for potential security vulnerabilities and is an essential element of our Secure Software Development Lifecycle.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/sonarqube.md"},{"flag":"default","featured":false,"revisions":[{"name":"sparkpost","release":"2017-03-01","title":"SparkPost","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sparkpost.md","body":"Sparkpost is an SaaS service for E-Mail delivery and E-Mail templating that can be used to send E-Mails by calling an API.
\nIn a lot of projects, it is a typical requirement that different E-Mails need to be sent and that the project stakeholders want to adjust E-Mail templates and content on a relatively regular basis.
\nAlso, (mass) sending E-Mails and avoiding that they are classified as Spam is not an easy topic. That's why we decided to use E-Mail delivery services in our projects and evaluated different providers.
\nWe decided to start using SparkPost because of pricing, feature set and the available reviews on the Internet. There are also other possible solutions such as SendGrid or Postmark.
\n"}],"name":"sparkpost","title":"sparkpost.md","quadrant":"platforms-and-aoe-services","body":"Sparkpost is an SaaS service for E-Mail delivery and E-Mail templating that can be used to send E-Mails by calling an API.
\nIn a lot of projects, it is a typical requirement that different E-Mails need to be sent and that the project stakeholders want to adjust E-Mail templates and content on a relatively regular basis.
\nAlso, (mass) sending E-Mails and avoiding that they are classified as Spam is not an easy topic. That's why we decided to use E-Mail delivery services in our projects and evaluated different providers.
\nWe decided to start using SparkPost because of pricing, feature set and the available reviews on the Internet. There are also other possible solutions such as SendGrid or Postmark.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/sparkpost.md"},{"flag":"default","featured":true,"revisions":[{"name":"spock_geb","release":"2017-03-01","title":"Spock + Geb","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spock_geb.md","body":"spockframework.org - Spock is a testing and specification framework for Java and Groovy applications. What makes it stand out from the crowd is its beautiful and highly expressive specification language. Thanks to its JUnit runner, Spock is compatible with most IDEs, build tools and continuous integration servers. Spock is inspired from JUnit, jMock, RSpec, Groovy, Scala, Vulcans, and other fascinating life forms.
\ngebish.org - Geb is a browser automation solution. It brings together the power of WebDriver, the elegance of jQuery content selection, the robustness of Page Object modelling and the expressiveness of the Groovy language. It can be used for scripting, scraping and general automation or equally as a functional/web/acceptance testing solution via integration with testing frameworks such as Spock, JUnit & TestNG.
\nAt AOE, we use Spock in combination with Geb in various projects for black-box testing. Mainly, we implement our functional integration and acceptance testing automation with these frameworks, which work together seamlessly. And, we also like the convenience of extending the tests with Groovy built-ins or custom extensions.
\nBecause of the successful use in two of our large projects and the wide range of opportunities within the testing domain with Spock and Geb, we classify this combo with adopt.
\n\n"}],"name":"spock_geb","title":"Spock + Geb","ring":1,"quadrant":"languages-and-frameworks","body":"spockframework.org - Spock is a testing and specification framework for Java and Groovy applications. What makes it stand out from the crowd is its beautiful and highly expressive specification language. Thanks to its JUnit runner, Spock is compatible with most IDEs, build tools and continuous integration servers. Spock is inspired from JUnit, jMock, RSpec, Groovy, Scala, Vulcans, and other fascinating life forms.
\ngebish.org - Geb is a browser automation solution. It brings together the power of WebDriver, the elegance of jQuery content selection, the robustness of Page Object modelling and the expressiveness of the Groovy language. It can be used for scripting, scraping and general automation or equally as a functional/web/acceptance testing solution via integration with testing frameworks such as Spock, JUnit & TestNG.
\nAt AOE, we use Spock in combination with Geb in various projects for black-box testing. Mainly, we implement our functional integration and acceptance testing automation with these frameworks, which work together seamlessly. And, we also like the convenience of extending the tests with Groovy built-ins or custom extensions.
\nBecause of the successful use in two of our large projects and the wide range of opportunities within the testing domain with Spock and Geb, we classify this combo with adopt.
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spock_geb.md"},{"flag":"changed","featured":true,"revisions":[{"name":"spring-boot","release":"2019-11-01","title":"Spring Boot","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/spring-boot.md","body":"We now have several years of experiences with Spring Boot, \nand a big projects Microservice Environment runs completely on Spring Boot, \nso it's time to update it to "adopt".
\n"},{"name":"spring-boot","release":"2018-03-01","title":"Spring Boot","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/spring-boot.md","body":"We have had good experiences with Spring Boot, and already have several Spring Boot-based services running in \nproduction. We like the ease of kickstarting new services and the variety of tools in the Spring ecosystem.
\n"},{"name":"spring-boot","release":"2017-03-01","title":"Spring Boot","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-boot.md","body":"With Spring Boot you create standalone Spring Applications with minimum configuration. Spring Boot rapidly gets you up and running for production.
\nWith an embedded Tomcat, Jetty and Undertow you have everything you need to deploy your application out-of-the-box.
\nThe Spring Cloud ecosystem also gives you a lot of extension points for developing, deploying and running cloud applications.
\nIt's based on the rock-solid Spring framework and provides excellent documentation.
\nAt AOE, we use Spring Boot in a microservice architecture. Together with Groovy as the implementation Language, and some other Tools (Spring Security, Cloud, HATEOAS, Data, Session) from the Spring environment, we are able to create complex and powerful applications in no time.
\n"}],"name":"spring-boot","title":"Spring Boot","ring":1,"quadrant":"languages-and-frameworks","body":"We now have several years of experiences with Spring Boot, \nand a big projects Microservice Environment runs completely on Spring Boot, \nso it's time to update it to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/spring-boot.md"},{"flag":"default","featured":true,"revisions":[{"name":"spring-rest-docs","release":"2017-03-01","title":"Spring REST Docs","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-rest-docs.md","body":"Spring REST Docs auto generates Asciidoctor snippets with the help of Spring MVC Test or RestAssured. So you can be sure that your tests are inline with the documentation.
\nAt AOE, we use Spring REST Docs to document our Rest Services and Hal Resources. We also use it to auto generate Wiremock Stubs, so the consumer of the service can test against the exact API of the service.
\n"}],"name":"spring-rest-docs","title":"Spring REST Docs","ring":3,"quadrant":"tools","body":"Spring REST Docs auto generates Asciidoctor snippets with the help of Spring MVC Test or RestAssured. So you can be sure that your tests are inline with the documentation.
\nAt AOE, we use Spring REST Docs to document our Rest Services and Hal Resources. We also use it to auto generate Wiremock Stubs, so the consumer of the service can test against the exact API of the service.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-rest-docs.md"},{"flag":"new","featured":true,"revisions":[{"name":"storybook","release":"2019-11-01","title":"Storybook","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/storybook.md","body":"Storybook is a user interface development environment and playground for UI components. The tool enables developers to create components independently and showcase components interactively in an isolated development environment.\nStorybook runs outside of the main app so users can develop UI components in isolation without worrying about app specific dependencies and requirements.
\n"}],"name":"storybook","title":"Storybook","ring":3,"quadrant":"tools","body":"Storybook is a user interface development environment and playground for UI components. The tool enables developers to create components independently and showcase components interactively in an isolated development environment.\nStorybook runs outside of the main app so users can develop UI components in isolation without worrying about app specific dependencies and requirements.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/storybook.md"},{"flag":"default","featured":true,"revisions":[{"name":"strategic-domain-driven-design","release":"2017-03-01","title":"Strategic Domain Driven Design","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/strategic-domain-driven-design.md","body":"Design of distributed applications need to be done wisely. Strategic Domain Driven Design is an approach for modelling large-scale applications and systems and is introduced in the last part of Eric Evans' book Domain Driven Design.
\nDomain driven design is a well-known pattern family and has been established at AOE for quite some time now. Unlike Domain Driven Design, which focuses on the tactical design in an application, strategic domain driven design is an approach that is very helpful for the high-level strategic design of an application and distributed software architecture.
\nIt is a pattern familiy focused on using and defining Bounded Context and thinking explicitly of the different relationship patterns and the required "translation" of similar "concepts" between the bounded contexts. It is helpful to argue and find a good strategic architecture in alignment with the requirements, the domain and by considering Conway's Law.\nA context map and a common conceptional core help to understand and improve the overall strategic picture. Especially with the Microservice approach, it is important to define and connect services following the low coupling - high cohesion principles by idendifying fitting bounded contexts.
\nThe following chart gives an overview of possible relationships between bounded contexts:\n
While we have found that this approach is especially useful in designing distributed systems and applications with microservices, we have also extended this approach to provide guidlines for general enterprise architectures.
\n"}],"name":"strategic-domain-driven-design","title":"Strategic Domain Driven Design","ring":1,"quadrant":"methods-and-patterns","body":"Design of distributed applications need to be done wisely. Strategic Domain Driven Design is an approach for modelling large-scale applications and systems and is introduced in the last part of Eric Evans' book Domain Driven Design.
\nDomain driven design is a well-known pattern family and has been established at AOE for quite some time now. Unlike Domain Driven Design, which focuses on the tactical design in an application, strategic domain driven design is an approach that is very helpful for the high-level strategic design of an application and distributed software architecture.
\nIt is a pattern familiy focused on using and defining Bounded Context and thinking explicitly of the different relationship patterns and the required "translation" of similar "concepts" between the bounded contexts. It is helpful to argue and find a good strategic architecture in alignment with the requirements, the domain and by considering Conway's Law.\nA context map and a common conceptional core help to understand and improve the overall strategic picture. Especially with the Microservice approach, it is important to define and connect services following the low coupling - high cohesion principles by idendifying fitting bounded contexts.
\nThe following chart gives an overview of possible relationships between bounded contexts:\n
While we have found that this approach is especially useful in designing distributed systems and applications with microservices, we have also extended this approach to provide guidlines for general enterprise architectures.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/strategic-domain-driven-design.md"},{"flag":"new","featured":true,"revisions":[{"name":"stride-threat-modeling","release":"2019-11-01","title":"STRIDE Threat Modeling","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/stride-threat-modeling.md","body":"STRIDE is a model of threat groups that helps to identify security threats to any application, component or infrastructure.
\nThe acronym stands for:
\nAOE is applying the threat model in collaborative sessions using the Elevation of Privilege Card Game which helps to spark imagination and makes threats more tangible.
\n"}],"name":"stride-threat-modeling","title":"STRIDE Threat Modeling","ring":2,"quadrant":"methods-and-patterns","body":"STRIDE is a model of threat groups that helps to identify security threats to any application, component or infrastructure.
\nThe acronym stands for:
\nAOE is applying the threat model in collaborative sessions using the Elevation of Privilege Card Game which helps to spark imagination and makes threats more tangible.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/stride-threat-modeling.md"},{"flag":"default","featured":false,"revisions":[{"name":"styleguide-driven-development","release":"2018-03-01","title":"Styleguide Driven Development","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/styleguide-driven-development.md","body":"Updated to "adopt".
\n"},{"name":"styleguide-driven-development","release":"2017-03-01","title":"Styleguide Driven Development","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/styleguide-driven-development.md","body":"The goal of Styleguide Driven Development is to develop your application user Interface independently and reusable in a Pattern Library.\\\nIn the old days, the frontend was developed based on page-centric Photoshop files which made it hard to change things afterwards. With styleguide driven development you build smaller elements, which are reusable in all of your frontends.
\nYou can start developing your UI components (HTML/CSS/JavaScript) very early in the production phase without having to wait for a ready-to-use development system.\\\nDesigners and Testers can give feedback early and you can share the documentation and code with external teams.
\nAt AOE, we use Hologram to build a living documentation right from the source files. Whenever a new UI Element is needed, a developer starts building it in the styleguide -- not in the actual application code. By writing the code for the new component, the documentation for it is created instantly. Any other developer can easily see which elements exist and how it can be used in the code.
\n"}],"name":"styleguide-driven-development","title":"styleguide-driven-development.md","quadrant":"methods-and-patterns","body":"Updated to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/styleguide-driven-development.md"},{"flag":"default","featured":false,"revisions":[{"name":"symfony-components","release":"2017-03-01","title":"Symfony Components","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/symfony-components.md","body":"Symfony Components are part of the Symfony Framework and they are designed as decoupled and reusable PHP components.
\nTheir use cases vary from simple little helpers such as a beautified var_dump to more complex ones such as access control, list-based security mechanisms and an easy-to-integrate console component to give your already existing applications some CLI capabilities. They are used by a lot of PHP-based projects such as Typo3, Magento, Composer, PHPUnit and Doctrine, with contributions continually taking place. If you are planning the next project with PHP components, you should have a look at the Symfony Components list, which includes a lot of well-designed, decoupled Open Source pieces of PHP code.
\n"}],"name":"symfony-components","title":"symfony-components.md","quadrant":"languages-and-frameworks","body":"Symfony Components are part of the Symfony Framework and they are designed as decoupled and reusable PHP components.
\nTheir use cases vary from simple little helpers such as a beautified var_dump to more complex ones such as access control, list-based security mechanisms and an easy-to-integrate console component to give your already existing applications some CLI capabilities. They are used by a lot of PHP-based projects such as Typo3, Magento, Composer, PHPUnit and Doctrine, with contributions continually taking place. If you are planning the next project with PHP components, you should have a look at the Symfony Components list, which includes a lot of well-designed, decoupled Open Source pieces of PHP code.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/symfony-components.md"},{"flag":"new","featured":true,"revisions":[{"name":"temporal-modeling","release":"2019-11-01","title":"Temporal Modeling","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/temporal-modeling.md","body":"Temporal Modeling is way of modeling software systems and components by putting events first.
\nThe usual way of modeling software is to find structures, things and relations.\nWe try to find the relevant aspects of a domain and put all properties into an object-oriented model.\nTrying to create a second model for a related business process, having the structural model already in place,\nmight result in a process representation that is tightly coupled with the assumptions built up from the structural\nmodel and too far away from reality.
\nBy focusing on the domain processes first, one can visualize all aspects of a process over time.\nHaving the process visualized, allows to see potential pitfalls or forgotten aspects.\nWith a temporal model at hand, it is easy to create a object-oriented or structural model that perfectly\nrepresents all required information.
\nWe tried this method when tackling big or complex domains.
\n"}],"name":"temporal-modeling","title":"Temporal Modeling","ring":3,"quadrant":"methods-and-patterns","body":"Temporal Modeling is way of modeling software systems and components by putting events first.
\nThe usual way of modeling software is to find structures, things and relations.\nWe try to find the relevant aspects of a domain and put all properties into an object-oriented model.\nTrying to create a second model for a related business process, having the structural model already in place,\nmight result in a process representation that is tightly coupled with the assumptions built up from the structural\nmodel and too far away from reality.
\nBy focusing on the domain processes first, one can visualize all aspects of a process over time.\nHaving the process visualized, allows to see potential pitfalls or forgotten aspects.\nWith a temporal model at hand, it is easy to create a object-oriented or structural model that perfectly\nrepresents all required information.
\nWe tried this method when tackling big or complex domains.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/temporal-modeling.md"},{"flag":"changed","featured":true,"revisions":[{"name":"terraform","release":"2019-11-01","title":"Terraform","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/terraform.md","body":"Terraform is a tool for building, changing and versioning infrastructure using the infrastructure as code pattern.\nTerraform supports popular service providers like AWS, Google Cloud Platform, Azure and many more.
\nInfrastructure is described in configuration files trough the HCL (HashiCorp Configuration Language), which brings a set of string interpolations and built-in functions, \nincluding conditionals and loops. Terraform validates configuration files before trying to run updates. It checks not only that all files use the correct syntax, \nbut also that all parameters are accessible and the configuration as a whole is valid. In Terraform, you can (and should) run a ‘plan’ step before applying any changes. \nThis step tells you precisely what is going to change and why.\nAnother feature of Terraform is that it makes it easy to reuse code by using modules. That gives a lot of leeway in structuring projects in the way it makes most sense.
\nHere at AOE we use terraform in multiple teams to provision infrastructure and manage their lifecycle on cloud platforms such as AWS and for platforms such as Kubernetes.
\n"},{"name":"terraform","release":"2018-03-01","title":"Terraform","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/terraform.md","body":"For the infrastructure of our OM3 projects we run multiple Kubernetes clusters, and to orchestrate the infrastructure provisioning we quickly decided to go with Terraform.\nTerraform allows us to easily manage our infrastructure, from AWS EC2 instances to RabbitMQ message queues.\nAlso, the Kops installer for Kubernetes on AWS uses Terraform as its main building brick, and we can trigger Kops via Terraform.
\nWe bring terraform together with Helm to manage similar parts of the infrastructure, for example a shared file with domainname to application mappings allows us to provision Route 53 DNS entries via Terraform and then roll out Kubernetes Ingress definitions with the appropriate hostname to service mapping via Helm.
\n"}],"name":"terraform","title":"Terraform","ring":1,"quadrant":"platforms-and-aoe-services","body":"Terraform is a tool for building, changing and versioning infrastructure using the infrastructure as code pattern.\nTerraform supports popular service providers like AWS, Google Cloud Platform, Azure and many more.
\nInfrastructure is described in configuration files trough the HCL (HashiCorp Configuration Language), which brings a set of string interpolations and built-in functions, \nincluding conditionals and loops. Terraform validates configuration files before trying to run updates. It checks not only that all files use the correct syntax, \nbut also that all parameters are accessible and the configuration as a whole is valid. In Terraform, you can (and should) run a ‘plan’ step before applying any changes. \nThis step tells you precisely what is going to change and why.\nAnother feature of Terraform is that it makes it easy to reuse code by using modules. That gives a lot of leeway in structuring projects in the way it makes most sense.
\nHere at AOE we use terraform in multiple teams to provision infrastructure and manage their lifecycle on cloud platforms such as AWS and for platforms such as Kubernetes.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/terraform.md"},{"flag":"changed","featured":true,"revisions":[{"name":"typescript","release":"2019-11-01","title":"Typescript","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/typescript.md","body":"As writing frontend applications becomes more complex, TypeScript allows us to scale client side code easily, even with large code bases. We use typescript successfully at production for many projects and we are only going to use it even more in the future. We highly recommend using typescript over javascript, therefore we have decided to move it to adopt.
\n"},{"name":"typescript","release":"2017-03-01","title":"Typescript","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/typescript.md","body":"TypeScript is a language that gets transpiled to native JavaScript code.
\nIt offers support for the latest EcmaScript features and has strict typing and support for interfaces built in.
\nJavaScript scoping, which led into recurring workarounds such as var self = this, myFunc.bind(this)_,_was eliminated in TypeScript.
\nIn TypeScript this stays this, which leads to more readable and understandable code from an OOP perspective.
\nTypeScript continues to be actively developed by Microsoft and is well-Integrated in today's IDEs.
\nThe excellent structure and the possibilities for extension make it a good choice to consider for larger JavaScript projects.
\nTypescript was the choice for Angular and one can assume that it will get more traction with the success of Angular in the future.
\nThere are also projects that support Typescript „code execution“ on the server such as ts-node.
\n"}],"name":"typescript","title":"Typescript","ring":1,"quadrant":"languages-and-frameworks","body":"As writing frontend applications becomes more complex, TypeScript allows us to scale client side code easily, even with large code bases. We use typescript successfully at production for many projects and we are only going to use it even more in the future. We highly recommend using typescript over javascript, therefore we have decided to move it to adopt.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/typescript.md"},{"flag":"default","featured":false,"revisions":[{"name":"typo3-as-a-framework","release":"2017-03-01","title":"TYPO3 as a Framework","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/typo3-as-a-framework.md","body":"We should avoid building new projects around TYPO3 by default. A lot of past projects started with CMS-only features in the beginning, and, for example, developed toward highly customized E-Commerce platforms. Instead of rearranging the architecture in a useful way, functionality was built on top of TYPO3's core and its extension framework Extbase. In the context of larger projects, this lead to deployment monoliths and the inability to integrate new technologies.
\nWhile in the past it was easy to kickstart a TYPO3 project with AOE's custom-tailored kickstarter, we now have a lot of knowledge and tools available to start projects with a much smarter architecture.\nThis does not mean you shouldn't use TYPO3 anymore, but use it as the tool it is: a content management system.
\n"}],"name":"typo3-as-a-framework","title":"typo3-as-a-framework.md","quadrant":"tools","body":"We should avoid building new projects around TYPO3 by default. A lot of past projects started with CMS-only features in the beginning, and, for example, developed toward highly customized E-Commerce platforms. Instead of rearranging the architecture in a useful way, functionality was built on top of TYPO3's core and its extension framework Extbase. In the context of larger projects, this lead to deployment monoliths and the inability to integrate new technologies.
\nWhile in the past it was easy to kickstart a TYPO3 project with AOE's custom-tailored kickstarter, we now have a lot of knowledge and tools available to start projects with a much smarter architecture.\nThis does not mean you shouldn't use TYPO3 anymore, but use it as the tool it is: a content management system.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/typo3-as-a-framework.md"},{"flag":"new","featured":true,"revisions":[{"name":"vault","release":"2019-11-01","title":"Vault","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vault.md","body":"Hashicorp Vault is a Go application with a Rest/Cli interface that you can use to securely access secrets.\nA secret can be any sensitive data, such as credentials, certificates, access tokens, encryption keys etc. \nVaults key features are a secure secret storage, dynamic secretes (create on-demand secrets), data encryption, secret leasing, renewal and revocation.
\n"}],"name":"vault","title":"Vault","ring":2,"quadrant":"tools","body":"Hashicorp Vault is a Go application with a Rest/Cli interface that you can use to securely access secrets.\nA secret can be any sensitive data, such as credentials, certificates, access tokens, encryption keys etc. \nVaults key features are a secure secret storage, dynamic secretes (create on-demand secrets), data encryption, secret leasing, renewal and revocation.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vault.md"},{"flag":"default","featured":true,"revisions":[{"name":"vue","release":"2018-03-01","title":"Vue.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/vue.md","body":"Updated to "trial".
\n"},{"name":"vue","release":"2017-03-01","title":"Vue.js","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/vue.md","body":"Vue is a progressive, incrementally adoptable framework for building user interfaces maintained by Evan You. Unlike other monolithic frameworks, the core library is focused on the view layer only and is very easy to pick up and integrate with other libraries or existing projects. Vue is also perfectly capable of powering sophisticated single-page applications when used in combination with modern tooling and supporting libraries such as vuex and vue-router.
\nVue uses an HTML-based template syntax that allows you to declaratively bind the rendered DOM to the underlying Vue instance’s data. Under the hood, Vue compiles the templates into Virtual DOM render functions. Combined with the reactivity system Vue is able to intelligently figure out the minimal amount of components to re-render and apply the minimal amount of DOM manipulations when the app state changes, which provides for very high performance.
\nApplications can be split into Single File Components - a single file containing the template (HTML), style (CSS) and functionality (JS) - which simplifies maintainability and testability of the code and promotes reusability across other projects.
\n"}],"name":"vue","title":"Vue.js","ring":2,"quadrant":"languages-and-frameworks","body":"Updated to "trial".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/vue.md"},{"flag":"new","featured":true,"revisions":[{"name":"vuex","release":"2019-11-01","title":"Vuex","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vuex.md","body":"Vuex is a state management pattern + library for Vue.js applications.
\n"}],"name":"vuex","title":"Vuex","ring":3,"quadrant":"languages-and-frameworks","body":"Vuex is a state management pattern + library for Vue.js applications.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vuex.md"},{"flag":"default","featured":true,"revisions":[{"name":"webpack","release":"2018-03-01","title":"Webpack","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/webpack.md","body":"In the last few years, Webpack has grown to become the de-facto standard for Web\nbundling in the JavaScript-Ecosystem. With Version 3, Webpack is a more robust\nand better documented bundler with nice new features such as\nscope hoisting.\nBecause of this, and because of the continuously growing community, we have adopted Webpack for nearly\nevery single-page application we have.
\n"},{"name":"webpack","release":"2017-03-01","title":"Webpack","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/webpack.md","body":"Webpack is a web bundler for JavaScript applications. Instead of writing scripts to build and bundle your app like you would with Gulp, you just define what files you want to load into your bundle.
\nIn the following example, we define that JavaScript files should be handled by babel-loader, excluding the files from node_modules. The logic behind the process comes from the loader. You can find the right loader in npm.
\n{\n test: /\\.js$/,\n loader: 'babel-loader',\n exclude: /node_modules/,\n}\nOn top of that you can use plugins to optimize your bundle like uglifying your code or put your common libraries in a separate file.
\nUnder the hood, you've got nice features such as:
\nThe configuration is simple and there is excellent and extensive documentation.
\n"}],"name":"webpack","title":"Webpack","ring":1,"quadrant":"tools","body":"In the last few years, Webpack has grown to become the de-facto standard for Web\nbundling in the JavaScript-Ecosystem. With Version 3, Webpack is a more robust\nand better documented bundler with nice new features such as\nscope hoisting.\nBecause of this, and because of the continuously growing community, we have adopted Webpack for nearly\nevery single-page application we have.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/webpack.md"},{"flag":"default","featured":true,"revisions":[{"name":"wiremock","release":"2017-03-01","title":"Wiremock","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/wiremock.md","body":"WireMock is an HTTP mock server - it can be used to mock APIs for testing.
\nAt its core, it is a web server that can be prepared to serve canned responses to particular requests (stubbing), and that captures incoming requests so that they can be checked later (verification). It also has an assortment of other useful features including record/playback of interactions with other APIs, injection of faults and delays, simulation of stateful behavior.
\nIt can be used as a library by any JVM application, or run as a standalone process either on the same host as the system under test or a remote server. All of WireMock's features are accessible via its REST (JSON) interface and its Java API. Additionally, the mock server can be configured via JSON files.
\nAt AOE, we use WireMock as a standalone server to mock APIs that are outside our system context to get a stable environment for testing and rapid feedback. Besides the decoupled test and development advantages, the mocked APIs can also be used in contract-based tests. We also use embedded WireMock in functional tests to stub external services. The explicit test of faults are especially helpful in building and testing the resilience of your application.
\nBecause of the features such as flexible deployment, powerful request matching and record/payback interactions, as well as the fact that the server runs stable in our project environments, we classify WireMock as trial.
\n"}],"name":"wiremock","title":"Wiremock","ring":2,"quadrant":"tools","body":"WireMock is an HTTP mock server - it can be used to mock APIs for testing.
\nAt its core, it is a web server that can be prepared to serve canned responses to particular requests (stubbing), and that captures incoming requests so that they can be checked later (verification). It also has an assortment of other useful features including record/playback of interactions with other APIs, injection of faults and delays, simulation of stateful behavior.
\nIt can be used as a library by any JVM application, or run as a standalone process either on the same host as the system under test or a remote server. All of WireMock's features are accessible via its REST (JSON) interface and its Java API. Additionally, the mock server can be configured via JSON files.
\nAt AOE, we use WireMock as a standalone server to mock APIs that are outside our system context to get a stable environment for testing and rapid feedback. Besides the decoupled test and development advantages, the mocked APIs can also be used in contract-based tests. We also use embedded WireMock in functional tests to stub external services. The explicit test of faults are especially helpful in building and testing the resilience of your application.
\nBecause of the features such as flexible deployment, powerful request matching and record/payback interactions, as well as the fact that the server runs stable in our project environments, we classify WireMock as trial.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/wiremock.md"},{"flag":"default","featured":false,"revisions":[{"name":"xataface","release":"2017-03-01","title":"Xataface","ring":4,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/xataface.md","body":"In the past, we used a custom-developed toolset with Xataface,T3Deploy and a settings migration tool as an easy way to manage TYPO3- and Magento-related configurations and to automatically create environments on our shared integration/dev-servers.
\nToday, there is no advantage or need for Xataface. Don't use it anymore
\n"}],"name":"xataface","title":"xataface.md","quadrant":"platforms-and-aoe-services","body":"In the past, we used a custom-developed toolset with Xataface,T3Deploy and a settings migration tool as an easy way to manage TYPO3- and Magento-related configurations and to automatically create environments on our shared integration/dev-servers.
\nToday, there is no advantage or need for Xataface. Don't use it anymore
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/xataface.md"},{"flag":"default","featured":false,"revisions":[{"name":"xmlunit","release":"2017-03-01","title":"XMLUnit","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/xmlunit.md","body":"XMLUnit is a Java and .NET testing framework for XML documents. It is very useful for performing contract tests with SOAP interfaces or other XML-based message types.
\nComparing strings of XML can lead to instable tests because of the changing order of elements or changed values, etc. XMLUnit provides features to address these issues. It is possible to validate against an XML Schema, use XPath queries or compare against expected outcomes. It also comes with a nice diff-engine which makes it easy to check the parts of an XML document that are important.
\n"}],"name":"xmlunit","title":"xmlunit.md","quadrant":"tools","body":"XMLUnit is a Java and .NET testing framework for XML documents. It is very useful for performing contract tests with SOAP interfaces or other XML-based message types.
\nComparing strings of XML can lead to instable tests because of the changing order of elements or changed values, etc. XMLUnit provides features to address these issues. It is possible to validate against an XML Schema, use XPath queries or compare against expected outcomes. It also comes with a nice diff-engine which makes it easy to check the parts of an XML document that are important.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/xmlunit.md"},{"flag":"default","featured":false,"revisions":[{"name":"yarn","release":"2018-03-01","title":"Yarn","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/yarn.md","body":"Yarn is a dependency management tool for frontend (node) projects similar to npm. It also uses the npm registry and \ninfrastructure. According to Yarn, the benefits are that Yarn is much faster, automatically writes a .lock file and \nbuilds up a local cache to be even faster when installing packages again.
\nAt AOE, we started using Yarn in different projects to evaluate if we can switch to Yarn for all projects.
\n"}],"name":"yarn","title":"yarn.md","quadrant":"tools","body":"Yarn is a dependency management tool for frontend (node) projects similar to npm. It also uses the npm registry and \ninfrastructure. According to Yarn, the benefits are that Yarn is much faster, automatically writes a .lock file and \nbuilds up a local cache to be even faster when installing packages again.
\nAt AOE, we started using Yarn in different projects to evaluate if we can switch to Yarn for all projects.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/yarn.md"}],"releases":["2017-03-01","2018-03-01","2019-11-01"]} \ No newline at end of file +{"items":[{"flag":"default","featured":true,"revisions":[{"name":"adr","release":"2018-03-01","title":"ADR","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/adr.md","body":"Architecture Decision Records
\nADR is a lightweight documentation of important architecture decisions taken by the team.\nWithout documentation of the architecture and the architecture decisions, new team members can only do two things:
\nIt goes without saying that both options aren't right.
\nTherefore, we suggest documenting the important architecture decisions. We use a simple tool such as https://github.com/npryce/adr-tools and store them in version control.\nIn larger projects with many teams we also establish a regular "architecture board / COI" with regular meetings.\nOften, the architecture decisions are taken in such meetings.
\nThe main purpose of this documentation is to:
\nArchitecture Decision Records
\nADR is a lightweight documentation of important architecture decisions taken by the team.\nWithout documentation of the architecture and the architecture decisions, new team members can only do two things:
\nIt goes without saying that both options aren't right.
\nTherefore, we suggest documenting the important architecture decisions. We use a simple tool such as https://github.com/npryce/adr-tools and store them in version control.\nIn larger projects with many teams we also establish a regular "architecture board / COI" with regular meetings.\nOften, the architecture decisions are taken in such meetings.
\nThe main purpose of this documentation is to:
\nAkeneo is a Product Information Management system (also known as PIM, PCM or Product MDM) and helps centralize and harmonize all the technical and marketing information of products.
\nWe use Akeneo with success in our projects and products (For example in OM3), where it is responsible for:
\nThe system has a modern and friendly user interface and product managers find things such as completenesscheck, translation views and mass editing very helpful.
\nWith delta export and import capabilities and the usage of Mongo DB as persitence backend, the performance is acceptable. We miss a richer API - but the system is extendable and based on PHP/Symfony 2.
\n"}],"name":"akeneo","title":"akeneo.md","ring":3,"quadrant":"tools","body":"Akeneo is a Product Information Management system (also known as PIM, PCM or Product MDM) and helps centralize and harmonize all the technical and marketing information of products.
\nWe use Akeneo with success in our projects and products (For example in OM3), where it is responsible for:
\nThe system has a modern and friendly user interface and product managers find things such as completenesscheck, translation views and mass editing very helpful.
\nWith delta export and import capabilities and the usage of Mongo DB as persitence backend, the performance is acceptable. We miss a richer API - but the system is extendable and based on PHP/Symfony 2.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akeneo.md"},{"flag":"default","featured":true,"revisions":[{"name":"akka","release":"2017-03-01","title":"Akka","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/akka.md","body":"With the growing adoption of microservice-based architecures, the interest in frameworks and tools that make building systems that follow the reactive manifesto possible has increased.
\nAkka provides you a toolkit and runtime based on the Actor model known from Erlang to reach this goal.
\nIt's one of the most-adopted toolkits in its space with its key contributors beeing heavily involved in the overall movement of the reactive community as well.\nAt AOE, we use Akka when we need high-performance, efficient data processing or where its finite state machine plays nicely with the domain of the application. It is worth mentioning that the actor model might come with extra complexity and therefore should be used in problem spaces where the advantages of this approach bring enough value and no accidental complexity.
\n"}],"name":"akka","title":"Akka","ring":2,"quadrant":"languages-and-frameworks","body":"With the growing adoption of microservice-based architecures, the interest in frameworks and tools that make building systems that follow the reactive manifesto possible has increased.
\nAkka provides you a toolkit and runtime based on the Actor model known from Erlang to reach this goal.
\nIt's one of the most-adopted toolkits in its space with its key contributors beeing heavily involved in the overall movement of the reactive community as well.\nAt AOE, we use Akka when we need high-performance, efficient data processing or where its finite state machine plays nicely with the domain of the application. It is worth mentioning that the actor model might come with extra complexity and therefore should be used in problem spaces where the advantages of this approach bring enough value and no accidental complexity.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/akka.md"},{"flag":"changed","featured":false,"revisions":[{"name":"akka-streams","release":"2019-11-01","title":"Akka Streams","ring":1,"quadrant":"languages-and-frameworks","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akka-streams.md","body":"Updated to "adopt"
\n"},{"name":"akka-streams","release":"2018-03-01","title":"Akka Streams","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/akka-streams.md","body":"In our backend services, we frequently encounter the task to transform data\ncoming from and uploading to external sources and services.
\nBuilding more complex data transformation processes with Akka Actors has proven\nvery difficult for us in the past.
\nSeeing this data as a stream of elements could allow handling them piece by\npiece and only keeping as much of the data in-process as can currently be\nhandled.
\nAkka Streams is\na Reactive Streams implementation that\nprovides a very end-user friendly API for setting up streams for data\nprocessing that are bounded in resource usage and efficient. It uses the Akka\nActor Framework to execute these streams in an asynchronous and parallel\nfashion exploiting today's multi-core architectures without having the user to\ninteract with Actors directly. It handles things such as message resending in\nfailure cases and preventing message overflow. It is also interoperable with\nother Reactive Streams implementations.
\nOur first trials with Akka Streams were promising but we haven't yet implemented\ncomplex services with it.
\nWe will continue looking into it together with the\nAlpakka Connectors for integration\nwork.
\n"}],"name":"akka-streams","title":"Akka Streams","ring":1,"quadrant":"languages-and-frameworks","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/akka-streams.md"},{"flag":"changed","featured":true,"revisions":[{"name":"alpakka","release":"2019-11-01","title":"Alpakka","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/alpakka.md","body":"Updated to "adopt"
\n"},{"name":"alpakka","release":"2018-03-01","title":"Alpakka","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/alpakka.md","body":"When using Akka Streams to build\nreactive data transformation services you usually need to connect to several\ndifferent services such as FTP, S3 buckets, AMQP brokers or different databases.
\nAlpakka provides\nintegration building blocks for Akka Streams to access these services in a\nreactive fashion and contains transformations for working with XML, CSV or\nJSON structured data.
\nCombined, Akka Streams and Alpakka enable us to build small reactive\nintegration services with minimal resource consumption and good performance, and\nare a good alternative to larger ESB solutions or integration tools.
\n"}],"name":"alpakka","title":"Alpakka","ring":1,"quadrant":"languages-and-frameworks","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/alpakka.md"},{"flag":"default","featured":true,"revisions":[{"name":"angular","release":"2018-03-01","title":"Angular","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/angular.md","body":"In addition to numerous major upgrades from version 2 to 5, which often needed a "hands-on" approach, a lot has happened in the Angular \necosystem in 2017. Specifically, the improvements in the HTTP-Client, which now requires less coding effort. Or \nthe vast improvements on angular.cli such as aot (ahead of time compile) for faster rendering, fewer requests and \nmuch smaller builds, to just name the most important ones.
\nWe have achieved particularly good results using Angular in large and medium-size projects. Actually, \nit's our framework-of-choice in our telecommunication sector teams as a single-page application framework (SPA) for microservice front \nends.
\nThe convenient scaffolding of unit- and end-to-end-tests provides a quality-driven workflow.\nAlso, the module- and component architecture helps to keep the codebase understandable end maintainable.
\n"},{"name":"angular","release":"2017-03-01","title":"Angular","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/angular.md","body":"The latest version of the Angular Framework, which is used for large single-page applications.
\nAngular is a complete rewrite of Angular 1 — many things have changed compared to the first version. The latest best practices and toolings from the JavaScript community have found their way into Angular.
\nIt supports DI (dependency injection), it has a clean inheritance and a good separation of concerns. Angular follows the web component standards to avoid negative side effects between components.
\nWe think that Angular is well-structured on both a development and an application level.
\nWhen talking about Angular, we must consider the angular.cli as well, which provides a huge level of intelligent automation along the development process and project setup.
\n"}],"name":"angular","title":"Angular","ring":2,"quadrant":"languages-and-frameworks","body":"In addition to numerous major upgrades from version 2 to 5, which often needed a "hands-on" approach, a lot has happened in the Angular \necosystem in 2017. Specifically, the improvements in the HTTP-Client, which now requires less coding effort. Or \nthe vast improvements on angular.cli such as aot (ahead of time compile) for faster rendering, fewer requests and \nmuch smaller builds, to just name the most important ones.
\nWe have achieved particularly good results using Angular in large and medium-size projects. Actually, \nit's our framework-of-choice in our telecommunication sector teams as a single-page application framework (SPA) for microservice front \nends.
\nThe convenient scaffolding of unit- and end-to-end-tests provides a quality-driven workflow.\nAlso, the module- and component architecture helps to keep the codebase understandable end maintainable.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/angular.md"},{"flag":"default","featured":false,"revisions":[{"name":"ant","release":"2017-03-01","title":"Ant","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/ant.md","body":"Apache Ant was build in 1997 to have something like Make in the C/C++ world for Java. Ant uses xml files to describe steps required to produce executable artifacts from source code. The main concepts of tasks and targets are programmable in an imperative style.
\nApache Ant was and is widely used by large software projects. Our recommendation is to stop using Apache Ant for new projects. If you are free to choose, we recommend Gradle as an Apache Ant replacement.
\n"}],"name":"ant","title":"ant.md","ring":4,"quadrant":"tools","body":"Apache Ant was build in 1997 to have something like Make in the C/C++ world for Java. Ant uses xml files to describe steps required to produce executable artifacts from source code. The main concepts of tasks and targets are programmable in an imperative style.
\nApache Ant was and is widely used by large software projects. Our recommendation is to stop using Apache Ant for new projects. If you are free to choose, we recommend Gradle as an Apache Ant replacement.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ant.md"},{"flag":"changed","featured":false,"revisions":[{"name":"anypoint-platform","release":"2019-11-01","featured":false,"title":"anypoint-platform.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/anypoint-platform.md","body":"Recently our teams migrated some project from anypoint to "Apache Camel" or use "Alpakka" for integration work.
\n"},{"name":"anypoint-platform","release":"2017-03-01","title":"Anypoint platform","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/anypoint-platform.md","body":"Anypoint platform (formally known as Mule or Mule ESB) is an Enterprise Integration Platform written in Java.
\nAnypoint provide tools to use Enterprise Integration Patterns (EAI) and has a high number of ready-to-use connectors to communicate with software tools such as SAP, Salesforce, etc.
\nAnypoint Community Version is Open Source and contribution is possible. The platform is pluggable with own connectors. Mulesoft is also driving the raml specification and related Open Source tools.
\nAOE is a Mulesoft Partner and we use both the Community and Enterprise Versions of Anypoint. We use Anypoint as an API Gateway to combine and transform data from multiple backends. We use it as ESB or Integration platform for loose coupling of software components. And we also use it as legacy modernization to provide modern APIs for legacy- or foreign software.
\n"}],"name":"anypoint-platform","title":"anypoint-platform.md","ring":2,"quadrant":"tools","body":"Recently our teams migrated some project from anypoint to "Apache Camel" or use "Alpakka" for integration work.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/anypoint-platform.md"},{"flag":"new","featured":true,"revisions":[{"name":"aoe-sso","release":"2019-11-01","title":"AOE SSO","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aoe-sso.md","body":"To improve security and user experience we decided to install an organisation wide SSO and use OpenID Connect integrate with existing tools.\nWe use Keycloak as the SSO server, which is backed by our LDAP.\nThis also helps to implement new infrastructure security based on "BeyondCorp".
\n"}],"name":"aoe-sso","title":"AOE SSO","ring":1,"quadrant":"platforms-and-aoe-services","body":"To improve security and user experience we decided to install an organisation wide SSO and use OpenID Connect integrate with existing tools.\nWe use Keycloak as the SSO server, which is backed by our LDAP.\nThis also helps to implement new infrastructure security based on "BeyondCorp".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aoe-sso.md"},{"flag":"new","featured":false,"revisions":[{"name":"apache-camel","release":"2019-11-01","title":"Apache Camel","ring":2,"quadrant":"languages-and-frameworks","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apache-camel.md","body":""Camel" is an open source integration framework that empowers you to quickly and easily integrate various systems consuming or producing data.
\nOur teams are using Apache Camel as API Gateway that offers APIs and takes care of Federation to various Backends as well as Authorisation tasks.
\n"}],"name":"apache-camel","title":"Apache Camel","ring":2,"quadrant":"languages-and-frameworks","body":""Camel" is an open source integration framework that empowers you to quickly and easily integrate various systems consuming or producing data.
\nOur teams are using Apache Camel as API Gateway that offers APIs and takes care of Federation to various Backends as well as Authorisation tasks.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apache-camel.md"},{"flag":"default","featured":true,"revisions":[{"name":"api-first-design-approach","release":"2017-03-01","title":"API-First Design Approach","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/api-first-design-approach.md","body":"The API-First Design Approach puts the API design at the beginning of the implementation without any constraints, for example, from the current IT infrastructure or the implementation itself. The idea is to design the API in a way that it serves its purpose best and the consumers are enabled to work efficiently.
\nThere are several advantages to this approach. For example, it can help to avoid reflecting the internal structure of the application or any internal constraints. Furthermore, as one of the most important design aspects is consistency, one can define features such as the behavior of security, URL schemes, and API keys upfront. It also helps speed up parallel implementation. A team that consumes the API can start working directly after the API design because it can easily be mocked.
\nThere are several tools for modelling an API, but here at AOE we mainly use RAML as it provides a rich set of tools for generating documentation, mocking and more. For mocking we use Wiremock, for example.
\nRelated to the "API-First" approach is the "Headless" approach where an existing application (with or without existing API) is used as a backend for a separate frontend. We used this with sucess for Magento-based E-Commerce platforms. This allows encapsulating the core features of that application, while integrating it into a larger landscape of components using its API as a unified way to interact between components. Decoupling the core logic from its presentation layer allows picking the best technology stack for the various parts independently.
\nFor further reading see:
\n\n"}],"name":"api-first-design-approach","title":"API-First Design Approach","ring":2,"quadrant":"methods-and-patterns","body":"The API-First Design Approach puts the API design at the beginning of the implementation without any constraints, for example, from the current IT infrastructure or the implementation itself. The idea is to design the API in a way that it serves its purpose best and the consumers are enabled to work efficiently.
\nThere are several advantages to this approach. For example, it can help to avoid reflecting the internal structure of the application or any internal constraints. Furthermore, as one of the most important design aspects is consistency, one can define features such as the behavior of security, URL schemes, and API keys upfront. It also helps speed up parallel implementation. A team that consumes the API can start working directly after the API design because it can easily be mocked.
\nThere are several tools for modelling an API, but here at AOE we mainly use RAML as it provides a rich set of tools for generating documentation, mocking and more. For mocking we use Wiremock, for example.
\nRelated to the "API-First" approach is the "Headless" approach where an existing application (with or without existing API) is used as a backend for a separate frontend. We used this with sucess for Magento-based E-Commerce platforms. This allows encapsulating the core features of that application, while integrating it into a larger landscape of components using its API as a unified way to interact between components. Decoupling the core logic from its presentation layer allows picking the best technology stack for the various parts independently.
\nFor further reading see:
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/api-first-design-approach.md"},{"flag":"new","featured":true,"revisions":[{"name":"apollo-client","release":"2019-11-01","title":"Apollo Client","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apollo-client.md","body":"The Apollo Client is a tool to efficiently work together with an GraphQL server. \nIt makes it easy to run your queries and mutations, cache results, brings tooling to download schemas and generate types to name a few of the useful features.
\n"}],"name":"apollo-client","title":"Apollo Client","ring":2,"quadrant":"tools","body":"The Apollo Client is a tool to efficiently work together with an GraphQL server. \nIt makes it easy to run your queries and mutations, cache results, brings tooling to download schemas and generate types to name a few of the useful features.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/apollo-client.md"},{"flag":"default","featured":true,"revisions":[{"name":"artifactory","release":"2018-03-01","title":"Artifactory","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/artifactory.md","body":"Artifactory is now used in every newly started project at AOE and plays a\ncentral role as an artifact repository for libraries, applications and docker\nimages. While cleanup is still an issue, we recommend the adoption of an\nartifact repository in all our projects.
\n"},{"name":"artifactory","release":"2017-03-01","title":"Artifactory","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/artifactory.md","body":"JFrog Artifactory is a software tool, which, in the end, manages and stores (binary) artifacts.\nIn addition to storage, it provides a managing interface, which also allows to store build information, properties as well as dependencies per artifact which are organized within repositories. A fine grained security system enables easy management of which artifacts are available to whom.\nThe artifacts are exposed via an HTTP(S)-Url Artifactory, which can generate package-manager compatible manifests for the repositories. AOE utilizes Artifactory to serve Maven, Apt, Npm, Composer and Docker Repositories.
\nIn addition to storing own assets, Artifactory is able to proxy remote Repository for and cache resolved artifacts locally.\nThis results in an increased build performance and decouples builds from external service dependencies and ensures builds still work even if they utilize outdated dependencies that might not be publicly available anymore.
\nArtifactory provides a powerful REST-API for managing Artifacts including a powerful search AQL. It is utilized to provide complex release processes based on QA-Attributes on an artifact level.
\nArtifactory at AOE currently comes with some problems, too:
\nAOE is using the Professional version for a central instance that can be used by different teams. We encourage teams to use Artifactory instead of Jenkins to store and manage build artifacts - and to take care of cleaning up old artifacts automatically.
\n"}],"name":"artifactory","title":"Artifactory","ring":1,"quadrant":"platforms-and-aoe-services","body":"Artifactory is now used in every newly started project at AOE and plays a\ncentral role as an artifact repository for libraries, applications and docker\nimages. While cleanup is still an issue, we recommend the adoption of an\nartifact repository in all our projects.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/artifactory.md"},{"flag":"default","featured":false,"revisions":[{"name":"asciidoc","release":"2018-03-01","title":"AsciiDoc","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/asciidoc.md","body":"AsciiDoc is a lightweight markup language such as Markdown. \nWith a concise Syntax, it supports more features than Markdown without extensions such as Tables and Table of Contents.\nIt's easy to write complex documentation with AsciiDoc. And with Asciidoctor you can export your text to Pdf, HTML, etc.
\nAt AOE, we use AsciiDoc for Documentation in our Repositories.
\n"}],"name":"asciidoc","title":"asciidoc.md","ring":3,"quadrant":"tools","body":"AsciiDoc is a lightweight markup language such as Markdown. \nWith a concise Syntax, it supports more features than Markdown without extensions such as Tables and Table of Contents.\nIt's easy to write complex documentation with AsciiDoc. And with Asciidoctor you can export your text to Pdf, HTML, etc.
\nAt AOE, we use AsciiDoc for Documentation in our Repositories.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/asciidoc.md"},{"flag":"default","featured":false,"revisions":[{"name":"aws-lambda","release":"2017-03-01","title":"AWS Lambda","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/aws-lambda.md","body":"AWS Lambda is one of the exciting new "cloud-native" / serverless ways to run code without worrying about infrastructure. While it is possible to directly respond to web requests using the API Gateway, our teams are currently using AWS Lambda mostly for tasks outside the critical path. As a custom resource for CloudFormation, it allows us to manage all aspects of a deployment in an elegant way by simply deploying a new CloudFormation stack. Baking AMIs and doing green/blue switches are only two of the many use cases where AWS Lambda comes in very handy.
\nIn addition to deployment automation, we're using AWS Lambda to process incoming data. Being able to respond to events from various sources such as S3 Buckets, SNS topics, Kinesis streams and HTTP endpoints it's a perfect match to process, transform and forward incoming data in near-realtime at a fraction of the cost of running an ESB.
\n"}],"name":"aws-lambda","title":"aws-lambda.md","ring":2,"quadrant":"platforms-and-aoe-services","body":"AWS Lambda is one of the exciting new "cloud-native" / serverless ways to run code without worrying about infrastructure. While it is possible to directly respond to web requests using the API Gateway, our teams are currently using AWS Lambda mostly for tasks outside the critical path. As a custom resource for CloudFormation, it allows us to manage all aspects of a deployment in an elegant way by simply deploying a new CloudFormation stack. Baking AMIs and doing green/blue switches are only two of the many use cases where AWS Lambda comes in very handy.
\nIn addition to deployment automation, we're using AWS Lambda to process incoming data. Being able to respond to events from various sources such as S3 Buckets, SNS topics, Kinesis streams and HTTP endpoints it's a perfect match to process, transform and forward incoming data in near-realtime at a fraction of the cost of running an ESB.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/aws-lambda.md"},{"flag":"default","featured":true,"revisions":[{"name":"axure","release":"2018-03-01","title":"Axure","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/axure.md","body":"Axure is a tool that enables the creation of flowcharts, wireframes, mockups, user journeys and more.\nThrough features such as conditional logic, dynamic content and animations it is possible to create highly functional and rich UI prototypes, which convey a realistic look and feel as to how the application to be developed should behave and look.
\nWe at AOE have used Axure successfully in several projects and it helped us a lot, particularly:
\nIn conclusion, Axure is a great tool that provides all stakeholders with a common understanding and helped us a lot to specify requirements and find their implications.
\n"}],"name":"axure","title":"Axure","ring":2,"quadrant":"tools","body":"Axure is a tool that enables the creation of flowcharts, wireframes, mockups, user journeys and more.\nThrough features such as conditional logic, dynamic content and animations it is possible to create highly functional and rich UI prototypes, which convey a realistic look and feel as to how the application to be developed should behave and look.
\nWe at AOE have used Axure successfully in several projects and it helped us a lot, particularly:
\nIn conclusion, Axure is a great tool that provides all stakeholders with a common understanding and helped us a lot to specify requirements and find their implications.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/axure.md"},{"flag":"default","featured":true,"revisions":[{"name":"babel","release":"2018-03-01","title":"Babel","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/babel.md","body":"We have been using babel for some time now. Since we have started using it, we don't have to\nstruggle with unimplemented features of ECMAScript. In this regard, JavaScript is\nJavaScript, no matter what browser you are using. We we strongly recommend \nusing Babel or similar solutions (e.g. TypeScript).
\n"},{"name":"babel","release":"2017-03-01","title":"Babel","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/babel.md","body":"Babel gives you the possibility to use the latest features from JavaScript (ECMAScript) in the browser of your choice.
\nWithout Babel you had to use the feature set of your oldest browser or use feature detections such as modernizr or write polyfills on your own.
\nIn general, Babel is split in 2 ways to bring you the new goodies you want.
\nNew syntax will be compiled to old EcmaScript 5 code e.g.:
\n\nNew globals and functions are provided by babel-polyfill e.g.:
\nThe configuration is really simple due to the plugin system. You can choose which ECMAScript version and stage presets you want to use.
\nTo know what you need you can practice ECMAScript 6 by doing it with es6katas and ask caniuse.
\nIf you are using TypeScript, Babel is not necessary since you already get the new features with TypeScript.
\n"}],"name":"babel","title":"Babel","ring":1,"quadrant":"languages-and-frameworks","body":"We have been using babel for some time now. Since we have started using it, we don't have to\nstruggle with unimplemented features of ECMAScript. In this regard, JavaScript is\nJavaScript, no matter what browser you are using. We we strongly recommend \nusing Babel or similar solutions (e.g. TypeScript).
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/babel.md"},{"flag":"new","featured":true,"revisions":[{"name":"beyondcorp","release":"2019-11-01","title":"BeyondCorp","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/beyondcorp.md","body":"BeyondCorp is a Zero Trust framework that evolved at Google.\nWith the surge of cloud technologies and micro services the network perimeter is ever disappearing. \nThis provides challenges for authentication of subjects that used to heavily rely on network segments.\nWith Zero Trust no assumption is made about how far something can be trusted, everything is untrusted by default and authentication and authorisation happens all the time, not just once.\nWhile network segments and VPN connections may still have relevance in specific areas AOE is increasingly implementing BeyondCorp in all its components and services with implementing OAuth and OpenID Connect.
\n"}],"name":"beyondcorp","title":"BeyondCorp","ring":2,"quadrant":"methods-and-patterns","body":"BeyondCorp is a Zero Trust framework that evolved at Google.\nWith the surge of cloud technologies and micro services the network perimeter is ever disappearing. \nThis provides challenges for authentication of subjects that used to heavily rely on network segments.\nWith Zero Trust no assumption is made about how far something can be trusted, everything is untrusted by default and authentication and authorisation happens all the time, not just once.\nWhile network segments and VPN connections may still have relevance in specific areas AOE is increasingly implementing BeyondCorp in all its components and services with implementing OAuth and OpenID Connect.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/beyondcorp.md"},{"flag":"default","featured":false,"revisions":[{"name":"blameless-post-mortems","release":"2018-03-01","title":"Blameless Post Mortems","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/blameless-post-mortems.md","body":"\n\nFailure and invention are inseparable twins.
\n— Jeff Bezos
\n
Blameless Post Mortems provide a concept of dealing with failures that inevitably occur when developing and operating complex software solutions. After any major incident or outage, the team gets together to perform an in-depth analysis of what happened and what can be done to mitigate the risk of similar issues happening in the future.
\nBased on trust, and under the assumption that every person involved had good intentions to do the best-possible job given the information at hand, Blameless Post Mortems provide an opportunity to continuously improve the quality of software and infrastructure and the processes to deal with critical situations.
\nThe post mortem documentation usually consists of both a timeline of the events leading to an incident and the steps taken to its remediation, as well as future actions and learnings for increasing reslience and stability of our services.
\nAt AOE, we strive to conduct a Blameless Post Mortem meeting after every user-visible incident.
\n"}],"name":"blameless-post-mortems","title":"blameless-post-mortems.md","ring":2,"quadrant":"methods-and-patterns","body":"\n\nFailure and invention are inseparable twins.
\n— Jeff Bezos
\n
Blameless Post Mortems provide a concept of dealing with failures that inevitably occur when developing and operating complex software solutions. After any major incident or outage, the team gets together to perform an in-depth analysis of what happened and what can be done to mitigate the risk of similar issues happening in the future.
\nBased on trust, and under the assumption that every person involved had good intentions to do the best-possible job given the information at hand, Blameless Post Mortems provide an opportunity to continuously improve the quality of software and infrastructure and the processes to deal with critical situations.
\nThe post mortem documentation usually consists of both a timeline of the events leading to an incident and the steps taken to its remediation, as well as future actions and learnings for increasing reslience and stability of our services.
\nAt AOE, we strive to conduct a Blameless Post Mortem meeting after every user-visible incident.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/blameless-post-mortems.md"},{"flag":"default","featured":false,"revisions":[{"name":"bower","release":"2017-03-01","title":"Bower","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/bower.md","body":"Bower is a package manager for frontend resources such as JavaScript libraries and CSS frameworks. Compared to npm, it has a somewhat different approach to loading and resolving the packages, resulting in a smaller and cleaner folder structure.
\nIn small web projects, this approach is good and sufficient, but larger projects will need more dependencies such as task runners or testing frameworks, which are not available through Bower. As most of the frontend libraries are also available through npm, it's not suprising that we ask ourselves why Bower is still needed.
\nAt AOE, we decided to use npm as the only package manager to avoid having multiple tools doing similar things. Developers only need to deal with one solution, which makes the project easier to maintain.
\n"}],"name":"bower","title":"bower.md","ring":4,"quadrant":"tools","body":"Bower is a package manager for frontend resources such as JavaScript libraries and CSS frameworks. Compared to npm, it has a somewhat different approach to loading and resolving the packages, resulting in a smaller and cleaner folder structure.
\nIn small web projects, this approach is good and sufficient, but larger projects will need more dependencies such as task runners or testing frameworks, which are not available through Bower. As most of the frontend libraries are also available through npm, it's not suprising that we ask ourselves why Bower is still needed.
\nAt AOE, we decided to use npm as the only package manager to avoid having multiple tools doing similar things. Developers only need to deal with one solution, which makes the project easier to maintain.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/bower.md"},{"flag":"default","featured":true,"revisions":[{"name":"client-side-error-logging","release":"2017-03-01","title":"Client-side error logging","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/client-side-error-logging.md","body":"More and more business logic is done client-side with various web and app technologies. How do we know if everything works in production? We can easily track backend exceptions in the server logs, but what about client-side errors in the user's browser or mobile app?
\nWith client-side error logging, we send errors to a central server to see instantly what is going wrong. With this method errors can be found and resolved quickly before they affect even more users.
\nAt AOE, we use the Open Source solution Sentry.io. It can handle multiple projects and teams and integrates well with other services such as Mattemost/Slack and Issue Tracking Systems.
\n"}],"name":"client-side-error-logging","title":"Client-side error logging","ring":2,"quadrant":"methods-and-patterns","body":"More and more business logic is done client-side with various web and app technologies. How do we know if everything works in production? We can easily track backend exceptions in the server logs, but what about client-side errors in the user's browser or mobile app?
\nWith client-side error logging, we send errors to a central server to see instantly what is going wrong. With this method errors can be found and resolved quickly before they affect even more users.
\nAt AOE, we use the Open Source solution Sentry.io. It can handle multiple projects and teams and integrates well with other services such as Mattemost/Slack and Issue Tracking Systems.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/client-side-error-logging.md"},{"flag":"new","featured":true,"revisions":[{"name":"cockpit","release":"2019-11-01","title":"Cockpit","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cockpit.md","body":"Cockpit is a self-hosted headless and api-driven content management system.
\n"}],"name":"cockpit","title":"Cockpit","ring":3,"quadrant":"tools","body":"Cockpit is a self-hosted headless and api-driven content management system.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cockpit.md"},{"flag":"new","featured":true,"revisions":[{"name":"concourse-ci","release":"2019-11-01","title":"Concourse","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/concourse-ci.md","body":"Concourse is an open-source continuous "thing-doer". It represents a general approach for automation which can be used for Continuous Integration and Continuous Delivery. Concourse CI follows a strict concept that is based on idempotency, immutability, declarative config, stateless workers, and reproducible builds. Pipelines are built on the mechanics of resources, tasks and jobs, which are all configured in one or multiple YAML files. \nConcourse claims to be "simple" but has a steep learning curve in the beginning till it gets simple to use.\nConcourse is used in the Congstar Team to automate infrastructure deployments.
\n"}],"name":"concourse-ci","title":"Concourse","ring":2,"quadrant":"tools","body":"Concourse is an open-source continuous "thing-doer". It represents a general approach for automation which can be used for Continuous Integration and Continuous Delivery. Concourse CI follows a strict concept that is based on idempotency, immutability, declarative config, stateless workers, and reproducible builds. Pipelines are built on the mechanics of resources, tasks and jobs, which are all configured in one or multiple YAML files. \nConcourse claims to be "simple" but has a steep learning curve in the beginning till it gets simple to use.\nConcourse is used in the Congstar Team to automate infrastructure deployments.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/concourse-ci.md"},{"flag":"default","featured":false,"revisions":[{"name":"consul","release":"2017-03-01","title":"Consul","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/consul.md","body":"Consul is a lightweight service to provide a service discovery registry with failure detection (health checks) for circuit breakers. It also provides configuration management with key/value storage.\\\nThe typical way to use it is that a consul master cluster takes care of the update and write processes and consul clients run locally on the apps host - data is shared accross the complete Consul cluster. The data can be accessed by using DNS and HTTP APIs.
\nAt AOE, we use Consul for settings distribution with consul-template as a way to do Settings Injection during deployment. Consul is also used as service discovery between apps inside microservice environments.
\nWith Vault there is another tool that can be used to manage and share secrets.
\n"}],"name":"consul","title":"consul.md","ring":3,"quadrant":"tools","body":"Consul is a lightweight service to provide a service discovery registry with failure detection (health checks) for circuit breakers. It also provides configuration management with key/value storage.\\\nThe typical way to use it is that a consul master cluster takes care of the update and write processes and consul clients run locally on the apps host - data is shared accross the complete Consul cluster. The data can be accessed by using DNS and HTTP APIs.
\nAt AOE, we use Consul for settings distribution with consul-template as a way to do Settings Injection during deployment. Consul is also used as service discovery between apps inside microservice environments.
\nWith Vault there is another tool that can be used to manage and share secrets.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/consul.md"},{"flag":"changed","featured":true,"revisions":[{"name":"container-based-builds","release":"2019-11-01","title":"Container-based builds","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/container-based-builds.md","body":"Updated to "adopt". Container based builds has getting to the defacto standard for our pipelines in Gitlab or other CI Tools.
\n"},{"name":"container-based-builds","release":"2017-03-01","title":"Container-based builds","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/container-based-builds.md","body":"Running your builds in isolated containers keeps your build servers clean. It allows you to even run them with multiple versions of a framework or programming language. You don't need additional machines like you would for running builds with PHP5 or PHP7 at the same time or running some legacy builds.
\nNote that you need to think about some kind of caching mechanism for your depenendies to avoid downloading them in every build, which would cause long build times.
\nAt AOE, we are currently starting to use this approach for building services and it is especially useful if your build has special dependencies. Also, it's possible to use GitLab as a build tool or use Docker with the new Jenkinspipeline. For caching we are evaluating minio as a cache server. We noticed that our builds run quite rapidly and reliably with that. Also, the complexity of the builds decreased since we don't need any workarounds, which were caused by having everything installed on one build server.
\n"}],"name":"container-based-builds","title":"Container-based builds","ring":1,"quadrant":"methods-and-patterns","body":"Updated to "adopt". Container based builds has getting to the defacto standard for our pipelines in Gitlab or other CI Tools.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/container-based-builds.md"},{"flag":"default","featured":true,"revisions":[{"name":"crc","release":"2018-03-01","title":"CRC Games","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/crc.md","body":"Class Responsibility Collaboration Card Games are a method to discuss and align the software design - especially useful for object-oriented software.
\nA proper software design is one of the most important things to ensure the sucess and the maintainability of your software.\nEspecially for iterative development methods, where you work on a software task by task, it is important to have designs sessions that also look forward to the next iterations and the conceptional whole.
\nAnd for software design to be sucessfull, it is very important that everybody (in the team) has the same understanding of the design and stands behind it.
\nCRC sessions help to design and align the high-level object design and collaboration of your system with the whole team. During such sessions new team members can learn from the experience and explanations of tropers.
\nThis is how we often conduct a CRC Session:
\nClass Responsibility Collaboration Card Games are a method to discuss and align the software design - especially useful for object-oriented software.
\nA proper software design is one of the most important things to ensure the sucess and the maintainability of your software.\nEspecially for iterative development methods, where you work on a software task by task, it is important to have designs sessions that also look forward to the next iterations and the conceptional whole.
\nAnd for software design to be sucessfull, it is very important that everybody (in the team) has the same understanding of the design and stands behind it.
\nCRC sessions help to design and align the high-level object design and collaboration of your system with the whole team. During such sessions new team members can learn from the experience and explanations of tropers.
\nThis is how we often conduct a CRC Session:
\nCypress is a new front-end testing tool (end2end). It comes as a simple node package and is therefore easy to use and maintain for front-end developers and testers. Cypress has a different approach than selenium, it runs in the browser and in the same loop as the device under test.
\nGood:
\nNot so good:
\nExample of a test :
\ndescribe('My First Test', function() {\n it('Visits the Kitchen Sink', function() {\n cy.visit('https://example.cypress.io')\n\n cy.contains('type').click()\n\n cy.url().should('include', '/commands/actions')\n\n cy.get('.action-email')\n .type('fake@email.com')\n .should('have.value', 'fake@email.com')\n })\n})\n\n"}],"name":"cypress","title":"Cypress","ring":3,"quadrant":"tools","body":"Cypress is a new front-end testing tool (end2end). It comes as a simple node package and is therefore easy to use and maintain for front-end developers and testers. Cypress has a different approach than selenium, it runs in the browser and in the same loop as the device under test.
\nGood:
\nNot so good:
\nExample of a test :
\ndescribe('My First Test', function() {\n it('Visits the Kitchen Sink', function() {\n cy.visit('https://example.cypress.io')\n\n cy.contains('type').click()\n\n cy.url().should('include', '/commands/actions')\n\n cy.get('.action-email')\n .type('fake@email.com')\n .should('have.value', 'fake@email.com')\n })\n})\n\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/cypress.md"},{"flag":"default","featured":false,"revisions":[{"name":"dagger","release":"2017-03-01","title":"Dagger","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/dagger.md","body":"Dagger is a fully static, compile-time dependency injection framework for both Java and Android. Dagger doesn't use reflections at runtime, it saves resources. For us, it is a perfect match for Android development.
\nWe at AOE use it as a base framework for every Android project.
\n"}],"name":"dagger","title":"dagger.md","ring":1,"quadrant":"tools","body":"Dagger is a fully static, compile-time dependency injection framework for both Java and Android. Dagger doesn't use reflections at runtime, it saves resources. For us, it is a perfect match for Android development.
\nWe at AOE use it as a base framework for every Android project.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dagger.md"},{"flag":"default","featured":false,"revisions":[{"name":"datadog","release":"2017-03-01","title":"Datadog","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/datadog.md","body":"After realizing that AWS CloudWatch isn't flexible enough, and running our own metrics aggregation, monitoring and altering isn't something we want to do ourselves, we decided to give Datadog a try. Datadog is very simple to set up and retrieves metrics from the AWS API (and many other integrations) and from an agent running on the EC2 instances. On top of that, it comes with many plugins for services such as Apache, NGINX and ElasticSearch, allowing us to track all important metrics without much effort. Creating dashboards, setting up alarms and integrating into other applications (such as ticket systems) is easy to do and works fine.
\n"}],"name":"datadog","title":"datadog.md","ring":3,"quadrant":"platforms-and-aoe-services","body":"After realizing that AWS CloudWatch isn't flexible enough, and running our own metrics aggregation, monitoring and altering isn't something we want to do ourselves, we decided to give Datadog a try. Datadog is very simple to set up and retrieves metrics from the AWS API (and many other integrations) and from an agent running on the EC2 instances. On top of that, it comes with many plugins for services such as Apache, NGINX and ElasticSearch, allowing us to track all important metrics without much effort. Creating dashboards, setting up alarms and integrating into other applications (such as ticket systems) is easy to do and works fine.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/datadog.md"},{"flag":"default","featured":false,"revisions":[{"name":"decoupling-infrastructure-via-messaging","release":"2017-03-01","title":"Decoupling Infrastructure via Messaging","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/decoupling-infrastructure-via-messaging.md","body":"In Microservices we have already covered the trend that modern architectures are moving away more and more from big monolithic applications to distributed software suites. The result of splitting our software and infrastructure in smaller parts, is the need to communicate with each other. This can be done by direct communication or by message-based asynchronouous communication. While synchronuous communication allows for more plannable "real-time" response times of the overall systems, asynchronouos communication increases the resilience and stability of the system significantly and allows one to use other integration and scaling patterns. However, it often comes with additional complexity.
\nMost of the IaaS Cloud providers offer messaging services such as AWS SQS which provide the possibility to decouple our infrastructure via Messaging. Also, we use RabbitMQ as a Messaging and Broker solution within our applications. The decision of using messaging and messaging patterns as an integration strategy can be made as part of strategic design considerations.
\n"}],"name":"decoupling-infrastructure-via-messaging","title":"decoupling-infrastructure-via-messaging.md","ring":2,"quadrant":"methods-and-patterns","body":"In Microservices we have already covered the trend that modern architectures are moving away more and more from big monolithic applications to distributed software suites. The result of splitting our software and infrastructure in smaller parts, is the need to communicate with each other. This can be done by direct communication or by message-based asynchronouous communication. While synchronuous communication allows for more plannable "real-time" response times of the overall systems, asynchronouos communication increases the resilience and stability of the system significantly and allows one to use other integration and scaling patterns. However, it often comes with additional complexity.
\nMost of the IaaS Cloud providers offer messaging services such as AWS SQS which provide the possibility to decouple our infrastructure via Messaging. Also, we use RabbitMQ as a Messaging and Broker solution within our applications. The decision of using messaging and messaging patterns as an integration strategy can be made as part of strategic design considerations.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/decoupling-infrastructure-via-messaging.md"},{"flag":"new","featured":true,"revisions":[{"name":"dependency-update-scan","release":"2019-11-01","title":"Dependency Update Scan","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dependency-update-scan.md","body":"Automated dependency scans are useful to remove the manual task of regularly checking for version updates.\nOur teams are utilizing the Open Source bots Renovate and Scala Steward, both of which are running as a scheduled GitLab job in our internal infrastructure.\nThe bots are regularly creating merge requests with dependency version updates against our projects.
\nHaving this automated comes with a few advantages:
\nAutomated merge requests allow us to focus on reviewing, testing and prioritization of dependency version updates with considerably less effort.
\n"}],"name":"dependency-update-scan","title":"Dependency Update Scan","ring":3,"quadrant":"methods-and-patterns","body":"Automated dependency scans are useful to remove the manual task of regularly checking for version updates.\nOur teams are utilizing the Open Source bots Renovate and Scala Steward, both of which are running as a scheduled GitLab job in our internal infrastructure.\nThe bots are regularly creating merge requests with dependency version updates against our projects.
\nHaving this automated comes with a few advantages:
\nAutomated merge requests allow us to focus on reviewing, testing and prioritization of dependency version updates with considerably less effort.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/dependency-update-scan.md"},{"flag":"default","featured":true,"revisions":[{"name":"devops-practices","release":"2017-03-01","title":"Devops practices","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/devops-practices.md","body":"DevOps is a term that has been around for some years now. We understand DevOps as a philosophy and culture with related practices and tools - all with the aim of bringing (IT) Operations closer to Development.
\nJez Humble described the devops movement like this: "a cross-functional community of practice dedicated to the study of building, evolving and operating rapidly changing, secure, resilient systems at scale".
\nWith the size of software projects and the effects of agile development, the need to also deliver operation and infrastructure in an agile way increases more and more.
\nWe have been using the following practices with success:
\nCrossfunctional Teams "you build it, you run it"
\nIn the past year, we have moved from a more centralistic or standanlone IT and operations service team to crossfunctional teams with Infrastructure experts working in and with the development team (admins joining the project team).
\nAnd, we changed to crossfunctional teams and a "you build it, you run it" approach for the bigger projects. We have seen that this leads to the following positive effects:
\nAs always, we are establishing "community of interests" to improve and promote the knowledge transfer between different teams.
\nIncrease of relevant tools
\nAnother important aspect and also enabler of DevOps practices is the increase of certain tool and methods - some of them are also represented in the Tech Radar. For example: Puppet Environments; Docker; Cloud Services, Terraform, Consul etc.
\nDevSetup = Prod Setup, Infrastructure as a Code
\nKeeping the development infrastructure setup close to production is also a commonly implemented practice and a direct result of the "Infrastructure as Code" method. Handling infrastructure and the required changes and innovations in ways similar to those used for applications is important; you can ready more about this here: Infrastructure as Code
\nWe encourage all teams to adopt devops practices in the teams and to take care that there is a true collaboration between the different experts in a team and no invisible wall.
\n"}],"name":"devops-practices","title":"Devops practices","ring":1,"quadrant":"methods-and-patterns","body":"DevOps is a term that has been around for some years now. We understand DevOps as a philosophy and culture with related practices and tools - all with the aim of bringing (IT) Operations closer to Development.
\nJez Humble described the devops movement like this: "a cross-functional community of practice dedicated to the study of building, evolving and operating rapidly changing, secure, resilient systems at scale".
\nWith the size of software projects and the effects of agile development, the need to also deliver operation and infrastructure in an agile way increases more and more.
\nWe have been using the following practices with success:
\nCrossfunctional Teams "you build it, you run it"
\nIn the past year, we have moved from a more centralistic or standanlone IT and operations service team to crossfunctional teams with Infrastructure experts working in and with the development team (admins joining the project team).
\nAnd, we changed to crossfunctional teams and a "you build it, you run it" approach for the bigger projects. We have seen that this leads to the following positive effects:
\nAs always, we are establishing "community of interests" to improve and promote the knowledge transfer between different teams.
\nIncrease of relevant tools
\nAnother important aspect and also enabler of DevOps practices is the increase of certain tool and methods - some of them are also represented in the Tech Radar. For example: Puppet Environments; Docker; Cloud Services, Terraform, Consul etc.
\nDevSetup = Prod Setup, Infrastructure as a Code
\nKeeping the development infrastructure setup close to production is also a commonly implemented practice and a direct result of the "Infrastructure as Code" method. Handling infrastructure and the required changes and innovations in ways similar to those used for applications is important; you can ready more about this here: Infrastructure as Code
\nWe encourage all teams to adopt devops practices in the teams and to take care that there is a true collaboration between the different experts in a team and no invisible wall.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/devops-practices.md"},{"flag":"new","featured":true,"revisions":[{"name":"distributed-tracing","release":"2019-11-01","title":"Distributed Tracing","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/distributed-tracing.md","body":"Distributed Tracing creates visibility over processes spanning multiple applications.\nIn a microservice world where a request or operation involves multiple applications it is helpful to have an overview of what system is involved, at what point.\nAlso visibility of communicated data and errors helps to quickly identify issues in a microservice environment.\nOur tool of choice is Jaeger with B3 Propagation.
\n"}],"name":"distributed-tracing","title":"Distributed Tracing","ring":2,"quadrant":"platforms-and-aoe-services","body":"Distributed Tracing creates visibility over processes spanning multiple applications.\nIn a microservice world where a request or operation involves multiple applications it is helpful to have an overview of what system is involved, at what point.\nAlso visibility of communicated data and errors helps to quickly identify issues in a microservice environment.\nOur tool of choice is Jaeger with B3 Propagation.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/distributed-tracing.md"},{"flag":"default","featured":true,"revisions":[{"name":"docker","release":"2018-03-01","title":"Docker","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/docker.md","body":"Docker has pulled off very quickly and we updated it to "adopt".
\n"},{"name":"docker","release":"2017-03-01","title":"Docker","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/docker.md","body":"Docker is currently the most-used solution for creating and managing container-based infrastructures and deployments.
\nEssentially, Docker is a platform to build container images, distribute them and run them as an isolated process (using Linux kernel cgroups, network namespaces and custom mounts).
\nIn a DevOps environment, this helps a lot as we can run the exact same software and runtime (such as PHP) on both production and locally while developing. This enables us to debug our software much easier.
\nAlso, Docker allows us to keep our development setup much smaller and faster; instead of VirtualBox setups on a per-project base, we can compose our project development setup out of small containers. A CI environment building the containers allows us to package and test the whole environment instead of different software components on different runtimes in a much more stable way.
\nBacked by services such as Kubernetes, we can deploy Docker containers on a flexible infrastructure and enable our developers to test their software more easily in different environments.
\nHere at AOE, we assess Docker in different projects to become more flexible and faster, which increases our focus on development of even better and more stable software.
\n"}],"name":"docker","title":"Docker","ring":1,"quadrant":"platforms-and-aoe-services","body":"Docker has pulled off very quickly and we updated it to "adopt".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/docker.md"},{"flag":"new","featured":true,"revisions":[{"name":"eks","release":"2019-11-01","title":"Amazon EKS","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/eks.md","body":"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. \nAmazon EKS runs Kubernetes control plane instances across multiple Availability Zones to ensure high availability. \nIt also provides automated version upgrades and patching for them.
\nAmazon EKS is used as part of the infrastructure in the Congstar project. \nDifferent Amazon EKS Clusters are in use on a variety of environments like development, integration, testing and production.\nWe experienced that Kubernetes version updates are done without major efforts or impact to the running cluster.
\nAmazon EKS is fully supported by Terraform which brings the advantage that its configuration is written in code,\nwhich fulfils the infrastructure as code philosophy.
\n"}],"name":"eks","title":"Amazon EKS","ring":2,"quadrant":"platforms-and-aoe-services","body":"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. \nAmazon EKS runs Kubernetes control plane instances across multiple Availability Zones to ensure high availability. \nIt also provides automated version upgrades and patching for them.
\nAmazon EKS is used as part of the infrastructure in the Congstar project. \nDifferent Amazon EKS Clusters are in use on a variety of environments like development, integration, testing and production.\nWe experienced that Kubernetes version updates are done without major efforts or impact to the running cluster.
\nAmazon EKS is fully supported by Terraform which brings the advantage that its configuration is written in code,\nwhich fulfils the infrastructure as code philosophy.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/eks.md"},{"flag":"default","featured":true,"revisions":[{"name":"elasticsearch","release":"2018-03-01","title":"Elasticsearch","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/elasticsearch.md","body":"We are continuing to use Elasticsearch successfully in [Searchperience®] and have benefited from the aggregation features for related use cases such as rendering category trees.\nWe are also using Elasticsearch for some microservices as our persistence solution.
\nThis is why we have updated its status to adopt.
\n"},{"name":"elasticsearch","release":"2017-03-01","title":"Elasticsearch","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elasticsearch.md","body":"Elasticsearch is a REST-based search and analytics engine based on Lucene. Unlike its competitor Apache Solr, it was developed in the beginning with clustering and scaling in mind. It allows you to create complex queries while still delivering results very fast.
\nAt AOE, we use Elasticsearch for logging as well as our own search solution Searchperience®. We recently moved the Searchperience stack from Solr to Elasticsearch and think this was the right decision. Especially in terms of scaling, ease of use and performance, Elasticsearch really shines. Also, the API design took some of the learnings from Apache SOLR into account - for example, the queryDSL is a powerful way of describing different search use cases with highly flexible support of aggregations, etc.
\n"}],"name":"elasticsearch","title":"Elasticsearch","ring":1,"quadrant":"platforms-and-aoe-services","body":"We are continuing to use Elasticsearch successfully in [Searchperience®] and have benefited from the aggregation features for related use cases such as rendering category trees.\nWe are also using Elasticsearch for some microservices as our persistence solution.
\nThis is why we have updated its status to adopt.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/elasticsearch.md"},{"flag":"default","featured":true,"revisions":[{"name":"elk-stack","release":"2017-03-01","title":"ELK Stack","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elk-stack.md","body":"The company behind Elasticsearch offers a very nice solution for logging and analysis of distributed data such as logfiles.
\nIn today's increasingly distributed IT systems, it's very helpful to have a central view of what is going on in your systems - and of course nobody can and wants to look in different logfiles on different servers. A central logging solution provides the option to detect potential relationships between different events more easily. Also, also it can be used to extract useful KPIs or to visualize information on dashboards.
\nThe abbreviation "ELK Stack" stands for the Tools Elasticsearch, Logstash and Kibana: Together, they provide a solution for collecting data the ability to search, visualize and analyze data in real time.
\nLogstash is used to process and forward different data (or logfile) formats. Elasticsearch is used as a search index and together with the Kibana plugin you can configure highly individual dashboards. Recently, there are also the Beats Tools joining this toolstack to ship data to Elasticsearch.
\nWe have been using the ELK Stack for several years now in several projects and different infrastructure setups - we use it to visualize traffic, certain KPIs or just to analyze and search in application logs. We encourage all teams to use such a solution and take care to write useful logs in your applications.
\n"}],"name":"elk-stack","title":"ELK Stack","ring":1,"quadrant":"platforms-and-aoe-services","body":"The company behind Elasticsearch offers a very nice solution for logging and analysis of distributed data such as logfiles.
\nIn today's increasingly distributed IT systems, it's very helpful to have a central view of what is going on in your systems - and of course nobody can and wants to look in different logfiles on different servers. A central logging solution provides the option to detect potential relationships between different events more easily. Also, also it can be used to extract useful KPIs or to visualize information on dashboards.
\nThe abbreviation "ELK Stack" stands for the Tools Elasticsearch, Logstash and Kibana: Together, they provide a solution for collecting data the ability to search, visualize and analyze data in real time.
\nLogstash is used to process and forward different data (or logfile) formats. Elasticsearch is used as a search index and together with the Kibana plugin you can configure highly individual dashboards. Recently, there are also the Beats Tools joining this toolstack to ship data to Elasticsearch.
\nWe have been using the ELK Stack for several years now in several projects and different infrastructure setups - we use it to visualize traffic, certain KPIs or just to analyze and search in application logs. We encourage all teams to use such a solution and take care to write useful logs in your applications.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/elk-stack.md"},{"flag":"new","featured":true,"revisions":[{"name":"event-storming","release":"2019-11-01","title":"Event Storming","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/event-storming.md","body":"Event Storming is a method of modeling business processes using domain events.
\nWith complex business processes, people usually know their part of the process very well.\nHaving people from different departments in one room, allows (and requires!) a conversation.\nKnowledge silos get opened up. All learnings can be directly visualized.
\nWe tried this method a couple of times with different sized scopes. We believe it can be of value and has potential.
\nIt's like brainstorming - with the goal to visualize a business line or process.
\nEvent Storming is done in a workshop format.
\nTo get a business process modeled quickly and complete, it's important to get domain experts, developers, UX and\neverybody else who is involved to some extend in the related business line into one room.\nWith virtually unlimited space for modeling using big paper rolls put onto the walls, equipped with colored stickies\nand markers, the modeling workshop can start.
\nDuring the workshop, the goal is to model the big picture, without limiting or focusing just on parts of a process.
\n"}],"name":"event-storming","title":"Event Storming","ring":3,"quadrant":"methods-and-patterns","body":"Event Storming is a method of modeling business processes using domain events.
\nWith complex business processes, people usually know their part of the process very well.\nHaving people from different departments in one room, allows (and requires!) a conversation.\nKnowledge silos get opened up. All learnings can be directly visualized.
\nWe tried this method a couple of times with different sized scopes. We believe it can be of value and has potential.
\nIt's like brainstorming - with the goal to visualize a business line or process.
\nEvent Storming is done in a workshop format.
\nTo get a business process modeled quickly and complete, it's important to get domain experts, developers, UX and\neverybody else who is involved to some extend in the related business line into one room.\nWith virtually unlimited space for modeling using big paper rolls put onto the walls, equipped with colored stickies\nand markers, the modeling workshop can start.
\nDuring the workshop, the goal is to model the big picture, without limiting or focusing just on parts of a process.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/event-storming.md"},{"flag":"default","featured":true,"revisions":[{"name":"evil-user-stories","release":"2017-03-01","title":"Evil User Stories","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/evil-user-stories.md","body":"With Evil User Stories, we aim to raise the project teams' (PO, Dev-Team, QA) and clients' awareness for security topics and introduce a security-by-design principle.
\nThe first step is to identify business use cases of potential vulnerabilities in our software product. The next step is to write an Evil User Story for this use case, from the perspective of an evil persona, e.g. "John Badboy who wants to hack our software". The idea behind this is to take a look at specific parts (business logic) of the software from a perspective that would otherwise not be considered when working on standard user stories.
\nSo how would this work? To illustrate this, let's consider the following user story: "As Emma Shopping I am be able to pay for a product in my checkout using a credit card". To get that story done, we might have to persist some payment data somewhere. But within the context of an Evil user story we now also need to consider the security for the credit card and payment handling in our application. So, for that reason, we write an Evil User Story, which in this case could, for example, be "As John Badboy, I want to steal payment data" or more specifically "As John Badboy, I want to do to sql inject to get the payment token".
\nBefore implementation of this particular user story starts, developers should think about how they can secure potentially vulnerable parts of the software to prevent attacks such as sql injections. In this case, one approach should be the use of prepared statements for sql queries. When the development is finished, we should then be able to test the story using an automated testing approach with a penetration testing tool such as sqlmap to confirm that our database queries are not vulnerable to sql injections.
\nAdditionally, both solutions should be checked during the development process using code reviews to identify and correct potentially buggy code.
\n"}],"name":"evil-user-stories","title":"Evil User Stories","ring":3,"quadrant":"methods-and-patterns","body":"With Evil User Stories, we aim to raise the project teams' (PO, Dev-Team, QA) and clients' awareness for security topics and introduce a security-by-design principle.
\nThe first step is to identify business use cases of potential vulnerabilities in our software product. The next step is to write an Evil User Story for this use case, from the perspective of an evil persona, e.g. "John Badboy who wants to hack our software". The idea behind this is to take a look at specific parts (business logic) of the software from a perspective that would otherwise not be considered when working on standard user stories.
\nSo how would this work? To illustrate this, let's consider the following user story: "As Emma Shopping I am be able to pay for a product in my checkout using a credit card". To get that story done, we might have to persist some payment data somewhere. But within the context of an Evil user story we now also need to consider the security for the credit card and payment handling in our application. So, for that reason, we write an Evil User Story, which in this case could, for example, be "As John Badboy, I want to steal payment data" or more specifically "As John Badboy, I want to do to sql inject to get the payment token".
\nBefore implementation of this particular user story starts, developers should think about how they can secure potentially vulnerable parts of the software to prevent attacks such as sql injections. In this case, one approach should be the use of prepared statements for sql queries. When the development is finished, we should then be able to test the story using an automated testing approach with a penetration testing tool such as sqlmap to confirm that our database queries are not vulnerable to sql injections.
\nAdditionally, both solutions should be checked during the development process using code reviews to identify and correct potentially buggy code.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/evil-user-stories.md"},{"flag":"default","featured":false,"revisions":[{"name":"explicit-test-strategy","release":"2017-03-01","title":"Explicit test strategy","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/explicit-test-strategy.md","body":"According to the ISTQB Glossar- a Test Strategy is an abstract specification that comprises the designated test levels (unit, integration, system and acceptance tests) and the implementation of each level for a whole organization or for an application. This test strategy can be applicable to one or more projects.
\nAt AOE, we established an explicit test strategy for many of our projects. The coordination of the test levels improves the effectivity of test runs and helps to avoid testing gaps, double inspection and overhead. Every test level has a different focus. Tests that are executed on one level don't have to be implemented on others.
\nThese are the test levels that we implement as a standard in the software deployment pipeline of our projects and that handle multiple integrated components and services:
\nAs a rule, we automate the execution of tests where it is feasible and sensible. Related to the test strategy are the test concept, test data management and the usage of a test case management tool that allows one to assess and categorize functional test cases.
\nDue to the practical usefulness of having a sound test strategy for a project, we classify the explicit test strategy for projects with assess.
\n"}],"name":"explicit-test-strategy","title":"explicit-test-strategy.md","ring":3,"quadrant":"methods-and-patterns","body":"According to the ISTQB Glossar- a Test Strategy is an abstract specification that comprises the designated test levels (unit, integration, system and acceptance tests) and the implementation of each level for a whole organization or for an application. This test strategy can be applicable to one or more projects.
\nAt AOE, we established an explicit test strategy for many of our projects. The coordination of the test levels improves the effectivity of test runs and helps to avoid testing gaps, double inspection and overhead. Every test level has a different focus. Tests that are executed on one level don't have to be implemented on others.
\nThese are the test levels that we implement as a standard in the software deployment pipeline of our projects and that handle multiple integrated components and services:
\nAs a rule, we automate the execution of tests where it is feasible and sensible. Related to the test strategy are the test concept, test data management and the usage of a test case management tool that allows one to assess and categorize functional test cases.
\nDue to the practical usefulness of having a sound test strategy for a project, we classify the explicit test strategy for projects with assess.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/explicit-test-strategy.md"},{"flag":"new","featured":true,"revisions":[{"name":"falco","release":"2019-11-01","title":"Falco","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/falco.md","body":"Falco is an open source project for intrusion and abnormality detection for Cloud Native platforms such as Kubernetes. \nIt detects abnormal application behavior and sends alerts via Slack, Fluentd, NATS, and more.
\nWe are assessing Falco to add another angle to host based intrusion detection and alerting.
\n"}],"name":"falco","title":"Falco","ring":3,"quadrant":"tools","body":"Falco is an open source project for intrusion and abnormality detection for Cloud Native platforms such as Kubernetes. \nIt detects abnormal application behavior and sends alerts via Slack, Fluentd, NATS, and more.
\nWe are assessing Falco to add another angle to host based intrusion detection and alerting.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/falco.md"},{"flag":"new","featured":true,"revisions":[{"name":"flamingo","release":"2019-11-01","title":"Flamingo","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flamingo.md","body":"Flamingo is a high productivity go based framework for rapidly building fast and pluggable web projects.\nIt is used to build scalable and maintainable (web)applications.
\nFlamingo is:
\nGo as simple, powerful and typesafe language is great to implement and scale serverside logic.\nFlamingo has a clean architecture with clear dependencies in mind and offers a typical features and support for nowadays web applications:
\nFlamingo itself does not contain ORM Mapper or libraries - instead it emphasizes "ports and adapters" architecture - so that you have a technology free (domain) model and any possible (and replaceable) persitence behind it.\nThat makes Flamingo useful to build microservices and applications - especially to build "frontends" or portals that require interaction with other (micro) services in a distributed architecture. \nWhen sticking to the architectural recommendation you can build modular applications with replaceable adapters that gives you independed testability.
\nWith "Flamingo Commerce" there is an additional active projects that offer rich and flexible features to build modern e-commerce applications.
\n"}],"name":"flamingo","title":"Flamingo","ring":1,"quadrant":"languages-and-frameworks","body":"Flamingo is a high productivity go based framework for rapidly building fast and pluggable web projects.\nIt is used to build scalable and maintainable (web)applications.
\nFlamingo is:
\nGo as simple, powerful and typesafe language is great to implement and scale serverside logic.\nFlamingo has a clean architecture with clear dependencies in mind and offers a typical features and support for nowadays web applications:
\nFlamingo itself does not contain ORM Mapper or libraries - instead it emphasizes "ports and adapters" architecture - so that you have a technology free (domain) model and any possible (and replaceable) persitence behind it.\nThat makes Flamingo useful to build microservices and applications - especially to build "frontends" or portals that require interaction with other (micro) services in a distributed architecture. \nWhen sticking to the architectural recommendation you can build modular applications with replaceable adapters that gives you independed testability.
\nWith "Flamingo Commerce" there is an additional active projects that offer rich and flexible features to build modern e-commerce applications.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flamingo.md"},{"flag":"default","featured":false,"revisions":[{"name":"flow","release":"2017-03-01","title":"Flow","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/flow.md","body":"Flow is a PHP web application framework developed for the Neos project.
\nWe used Flow in a couple of projects and decided to put it on hold for the following reasons:
\nAlthough it could be that some of the above-mentioned aspects have improved in the past, we decided to use other PHP frameworks such as Symfony or other Languages (See Go; Play Framework; Spring Boot)
\n"}],"name":"flow","title":"flow.md","ring":4,"quadrant":"languages-and-frameworks","body":"Flow is a PHP web application framework developed for the Neos project.
\nWe used Flow in a couple of projects and decided to put it on hold for the following reasons:
\nAlthough it could be that some of the above-mentioned aspects have improved in the past, we decided to use other PHP frameworks such as Symfony or other Languages (See Go; Play Framework; Spring Boot)
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/flow.md"},{"flag":"new","featured":true,"revisions":[{"name":"flowtype","release":"2019-11-01","title":"Flow","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flowtype.md","body":"Flow is a static type checker for JavaScript code. It's goal is to make code faster, smarter, \nmore confidently, and to a bigger scale.
\n"}],"name":"flowtype","title":"Flow","ring":3,"quadrant":"tools","body":"Flow is a static type checker for JavaScript code. It's goal is to make code faster, smarter, \nmore confidently, and to a bigger scale.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flowtype.md"},{"flag":"new","featured":true,"revisions":[{"name":"flux","release":"2019-11-01","title":"Flux","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flux.md","body":"Flux is an application architecture for building client-side web applications,\nwhich is based on React's composable view components.
\n"}],"name":"flux","title":"Flux","ring":3,"quadrant":"methods-and-patterns","body":"Flux is an application architecture for building client-side web applications,\nwhich is based on React's composable view components.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/flux.md"},{"flag":"default","featured":false,"revisions":[{"name":"galen","release":"2017-03-01","title":"Galen","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/galen.md","body":"With Galen Framework, layout testing can be automated to save you a lot of manual work. With its own specification language (Galen Spec), you can write tests to verify the correct look of the web page as well as the location and alignment of specific elements on a page.
\nSo, you can write simple tests such as "The button should be green" as well as more complex behavior specifications such as "On mobile devices the button should be inside the viewport". Especially when testing a responsive website on multiple devices, browsers and resolutions, the manual testing effort gets expensive. To help with that, Galen runs its specifications fully automated with Selenium against the required browsers and devices.
\nWhenever a test fails Galen writes a test report with screenshots to show the mismatching areas on the page to help testers and developers become aware of the problem.
\nAt AOE, the Galen Framework helps us to continuously test the UI for potential regression bugs introduced by new features.
\n"}],"name":"galen","title":"galen.md","ring":3,"quadrant":"tools","body":"With Galen Framework, layout testing can be automated to save you a lot of manual work. With its own specification language (Galen Spec), you can write tests to verify the correct look of the web page as well as the location and alignment of specific elements on a page.
\nSo, you can write simple tests such as "The button should be green" as well as more complex behavior specifications such as "On mobile devices the button should be inside the viewport". Especially when testing a responsive website on multiple devices, browsers and resolutions, the manual testing effort gets expensive. To help with that, Galen runs its specifications fully automated with Selenium against the required browsers and devices.
\nWhenever a test fails Galen writes a test report with screenshots to show the mismatching areas on the page to help testers and developers become aware of the problem.
\nAt AOE, the Galen Framework helps us to continuously test the UI for potential regression bugs introduced by new features.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/galen.md"},{"flag":"default","featured":true,"revisions":[{"name":"gatlin","release":"2018-03-01","title":"Gatling","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gatlin.md","body":"Gatling is now the de-facto tool of choice for load testing in all of our\nprojects, having superseded JMeter completely. We therefore moved it to the\nAdopt level.
\n"},{"name":"gatlin","release":"2017-03-01","title":"Gatling","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gatlin.md","body":"Gatling is a highly capable load testing tool. It is designed for ease of use, maintainability and high performance.
\nOut of the box, Gatling comes with excellent support of the HTTP protocol that makes it a tool of choice for load testing any HTTP server. As the core engine is actually protocol agnostic, it is perfectly possible to implement support for other protocols. For example, Gatling currently also ships JMS support.
\nGatling is built with Scala Lang and Akka. By making good use of Scala's native language features (such as as the extensive type system), it makes writing tests feel natural and expressive, instead of writing load tests based on a DSL encoded in some special syntax.
\nThis allows us to use all native Scala features to work with, with the focus on the ability to structure your tests as pure code, and actually unit test your load tests.
\nBesides the very good performance, we definitely like the pure code-based approach. Gatling creates HTML-based reports with nice graphs and metrics about how and what was tested.
\nWe use Gatling as an alternative to Jmeter with success in some of our projects. We encourage teams to try Gatling for future load testing. There is an integrated test recorder similiar to what other test frameworks have to get you started with a basic test case.
\n"}],"name":"gatlin","title":"Gatling","ring":1,"quadrant":"tools","body":"Gatling is now the de-facto tool of choice for load testing in all of our\nprojects, having superseded JMeter completely. We therefore moved it to the\nAdopt level.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gatlin.md"},{"flag":"new","featured":true,"revisions":[{"name":"gitflow","release":"2019-11-01","title":"GitFlow","ring":4,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitflow.md","body":"Ever since there are recurring discussions about the version control strategy that a team should use.
\nWe have also made the experience when new teams start off with using blocking or long lived feature branches (merge late once all review comments are done) it has a negative impact on team performance.
\nWe recommend to use trunk based development with short lived (<1day) feature branches, because this has shown to support continuous integration and team collaboration the best. However we do accept teams choices to use GitFlow, we just do not try to encourage them in the first place.
\nSee also:
\nEver since there are recurring discussions about the version control strategy that a team should use.
\nWe have also made the experience when new teams start off with using blocking or long lived feature branches (merge late once all review comments are done) it has a negative impact on team performance.
\nWe recommend to use trunk based development with short lived (<1day) feature branches, because this has shown to support continuous integration and team collaboration the best. However we do accept teams choices to use GitFlow, we just do not try to encourage them in the first place.
\nSee also:
\nMoved to "adopt": Gitlab has proven to be a very useful tool for code and the collaboration around it.\nWith Gitlab CI there is also a powerful tool to automate continuous integration and delivery.
\n"},{"name":"gitlab","release":"2018-03-01","title":"Gitlab","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gitlab.md","body":"Gitlab provides nearly the same feature set as Github, but at a lower price. It also provides the possibility of hosting iternally, which is essential for us.
\nWe are migrating more and more repositories from gitolite, even from SVN to gitlab, as it provides a more stable and user friendly interface.
\nGitlab also makes user/permission handling easier than our old gitolite. We don't need the IT team every time a new repository needs to be set up.
\n"}],"name":"gitlab","title":"Gitlab","ring":1,"quadrant":"tools","body":"Moved to "adopt": Gitlab has proven to be a very useful tool for code and the collaboration around it.\nWith Gitlab CI there is also a powerful tool to automate continuous integration and delivery.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab.md"},{"flag":"changed","featured":true,"revisions":[{"name":"gitlab-ci","release":"2019-11-01","title":"Gitlab CI","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab-ci.md","body":"Moved to "adopt".
\n"},{"name":"gitlab-ci","release":"2018-03-01","title":"Gitlab CI","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gitlab-ci.md","body":"Until now, we have been using Jenkins for almost every single task that we have automated. With Gitlab CI on the market, we have a number of new possibilities.
\nSome of the highlights are:
\nMoved to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/gitlab-ci.md"},{"flag":"changed","featured":true,"revisions":[{"name":"go-lang","release":"2019-11-01","title":"Go / Golang","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/go-lang.md","body":"We have moved Go to "adopt".
\n"},{"name":"go-lang","release":"2018-03-01","title":"Go / Golang","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/go-lang.md","body":"We have moved Go to Trial because multiple teams have used Go with success for different services and tools.\nThe learning curve and productivity have proven to be immense and we are convinced that this language will find more adoption in other teams.
\n"},{"name":"go-lang","release":"2017-03-01","title":"Go / Golang","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/go-lang.md","body":"2016 was the year of Go, with a lot of Open Source projects gaining a lot of attention and many companies started to use it.
\nGo went from #54 to #13 on the TIOBE index in January 2017, and it became the TIOBE programming language of the year 2016.
\nHere at AOE, we use several services written in Go on a daily basis, such as Mattermost, Docker, Consul and Kubernetes. Also, more and more applications, such as Gitlab, incorporate Go-based services to "off load" heavy work.
\nGo, as a programming language, has some very interesting features such as native support for concurrency (go routines), static compiled binaries with a very small memory footprint, cross compiling and much more. A big advantage of Go is the very flat learning curve, which allows developers from more dynamic languages such as PHP to be proficient in a very short time.
\nIf you want to get a feeling for Go, you should start with the online tour, within a day you'll have a good understanding of the core concepts, syntax, etc. - that is also because the language often tries to provide only one simple way of doing things; an example for this is that code formatting and styling is defined (yet not enforced as in Python). Part of this is also that Go itself is very opinionated: So, for example, for object oriented programming in Go, composition is the prefered way of defining data structures, and some might miss advanced concepts such as inheritance.
\nWe currently use Go for projects and microservices where we need flexibility and performance.
\n"}],"name":"go-lang","title":"Go / Golang","ring":1,"quadrant":"languages-and-frameworks","body":"We have moved Go to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/go-lang.md"},{"flag":"default","featured":true,"revisions":[{"name":"gradle","release":"2017-03-01","title":"Gradle","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gradle.md","body":"Gradle is a build automation tool originating in the Java space, providing declarative dependency management (like Maven) and support for custom functionality (like Ant). It has superb multi-project support and is extremely extensible via third-party plugins and also via self-written extensions and plugins that make it outstanding in its area.
\nIt uses a Groovy-based DSL to declaratively model your problem domain (Build automation) and provides a rich object model with extension points to customize the build logic. Because it is extremely easy to extend this DSL, you can easily provide a declarative interface to your customizations and add-ons.
\nWhile providing plugins for building libs, apps and webapps in Java, Groovy and Scala out of the box it is not tied to the JVM as target platform, which is impressively shown by the native build support for C / C++.
\nAt AOE, it is used in various places already: to build Anypoint- and Spring Boot- based applications; to build Android Apps; to automate the creation of Jenkins Jobs; to create Docker images and Debian packages and also do some deployment scripting with it.
\n"}],"name":"gradle","title":"Gradle","ring":1,"quadrant":"tools","body":"Gradle is a build automation tool originating in the Java space, providing declarative dependency management (like Maven) and support for custom functionality (like Ant). It has superb multi-project support and is extremely extensible via third-party plugins and also via self-written extensions and plugins that make it outstanding in its area.
\nIt uses a Groovy-based DSL to declaratively model your problem domain (Build automation) and provides a rich object model with extension points to customize the build logic. Because it is extremely easy to extend this DSL, you can easily provide a declarative interface to your customizations and add-ons.
\nWhile providing plugins for building libs, apps and webapps in Java, Groovy and Scala out of the box it is not tied to the JVM as target platform, which is impressively shown by the native build support for C / C++.
\nAt AOE, it is used in various places already: to build Anypoint- and Spring Boot- based applications; to build Android Apps; to automate the creation of Jenkins Jobs; to create Docker images and Debian packages and also do some deployment scripting with it.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gradle.md"},{"flag":"changed","featured":true,"revisions":[{"name":"grafana","release":"2019-11-01","title":"Grafana","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grafana.md","body":"Updated to "adopt"
\n"},{"name":"grafana","release":"2018-03-01","title":"Grafana","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/grafana.md","body":"Grafana is an Open Source data visualization platform written in Go and NodeJS. It provides a vast choice of different graph types that can be easily combined into dashboards for displaying any kind of numerical or time-based data.
\nAt AOE, we usually use Grafana in conjunction with Prometheus or AWS CloudWatch for visualizing both application and infrastructure metrics.
\n"}],"name":"grafana","title":"Grafana","ring":1,"quadrant":"platforms-and-aoe-services","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grafana.md"},{"flag":"new","featured":true,"revisions":[{"name":"graphql","release":"2019-11-01","title":"GraphQL","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/graphql.md","body":"GraphQL is a query language for your API, and a server-side runtime for executing queries by using a type system you define for your data. GraphQL isn't tied to any specific database or storage engine and is instead backed by your existing code and data.
\nGraphQL was developed by Facebook around 2010 and releases 2015. \nThe main challenge it solves is to improve communication between browser and server on high dynamic web apps.
\nThe advantages are:
\nWe are using it together with Apollo Client in our React.js based frontend.\nThis way the React components have their relevant GraphQL snippet, defining what data they request or mutate from the "backend for frontend", directly coupled. \nThat makes it transparent what data is available. Apollo takes care of sending an aggregated GraphQL query to the backend.
\nThe framework Flamingo offers support for GraphQL and also Flamingo Commerce offers a full featured GraphQL API for e-commerce features. (Example GraphQL Console for Commerce)
\n"}],"name":"graphql","title":"GraphQL","ring":1,"quadrant":"methods-and-patterns","body":"GraphQL is a query language for your API, and a server-side runtime for executing queries by using a type system you define for your data. GraphQL isn't tied to any specific database or storage engine and is instead backed by your existing code and data.
\nGraphQL was developed by Facebook around 2010 and releases 2015. \nThe main challenge it solves is to improve communication between browser and server on high dynamic web apps.
\nThe advantages are:
\nWe are using it together with Apollo Client in our React.js based frontend.\nThis way the React components have their relevant GraphQL snippet, defining what data they request or mutate from the "backend for frontend", directly coupled. \nThat makes it transparent what data is available. Apollo takes care of sending an aggregated GraphQL query to the backend.
\nThe framework Flamingo offers support for GraphQL and also Flamingo Commerce offers a full featured GraphQL API for e-commerce features. (Example GraphQL Console for Commerce)
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/graphql.md"},{"flag":"changed","featured":true,"revisions":[{"name":"groovy","release":"2019-11-01","title":"Groovy","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/groovy.md","body":"Since the rise of Kotlin, we seen no need why to still use Groovy as an alternative to Java running on the JVM.
\n"},{"name":"groovy","release":"2017-03-01","title":"Groovy","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/groovy.md","body":"Groovy is a dynamically typed compiled language running on the JVM. It is easy to learn as it provides a familiar syntax for Java programmers, but also offers advanced features such as closures and makes some mandatory Java syntax requirements optional to enhance the conciseness of the code. These features make Groovy especially well-suited for scripting and domain-specific languages. This is used by popular tools such as Gradle or Spock.
\nAt AOE, Groovy is used in many projects and areas. We use Gradle as a build system, we carry out unit and integration testing with Spock and Geb, we generate Jenkins jobs with JobDSL and we implement complete services with Groovy and Spring Boot.
\n"}],"name":"groovy","title":"Groovy","ring":4,"quadrant":"languages-and-frameworks","body":"Since the rise of Kotlin, we seen no need why to still use Groovy as an alternative to Java running on the JVM.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/groovy.md"},{"flag":"changed","featured":true,"revisions":[{"name":"grpc","release":"2019-11-01","title":"GRPC","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grpc.md","body":"We adopted GRPC, because its used on multiple places within our microservice oriented architectures for internal communication.
\n"},{"name":"grpc","release":"2018-03-01","title":"GRPC","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/grpc.md","body":"gRPC, "A high-performance, Open Source, universal RPC framework," is a framework to easily connect clients and servers in an RPC setup.\ngRPC was initially built at Google, and uses protobuf service definitions for method and payload specification.\nEssentially, this makes it possible to define methods that a server exposes, with either a single payload or an incoming stream - either as a single response or a stream of responses.\nThe definition itself is carried out with the help of protobuf to define message types and method signatures, and then client and server interfaces are compiled for the language(s) you want. Currently there is support for languages such as C++, Java, Python, Go and many more.\nThe shared language-neutral protobuf definition allows you to create all code for all languages automatically and helps with the interoperability of different systems.
\nFrom a technical point of view, gRPC uses HTTP/2 as a transport, directly benefitting from the default TLS encryption.\nBesides gRPC, other frameworks also use protobuf RPC definitions. These frameworks include twirp from twitch, which makes it easy to change the transport/control layer with only very small changes to the application code.
\nWe at AOE plan to assess gRPC for microservice architectures which are more RPC style and less REST style.
\n"}],"name":"grpc","title":"GRPC","ring":1,"quadrant":"languages-and-frameworks","body":"We adopted GRPC, because its used on multiple places within our microservice oriented architectures for internal communication.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grpc.md"},{"flag":"default","featured":false,"revisions":[{"name":"grunt","release":"2017-03-01","title":"Grunt","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/grunt.md","body":"Grunt is a JavaScript task runner that automates repetitive tasks. While Grunt served us well for a good amount of projects,\nother alternatives such as Gulp emerged in the meantime and proved to be a better pick for the\nmajority of our teams.
\nWe have two main reasons for discarding Grunt in favor of other tools:
\nIf a decent amount of tasks is reached, Grunt is known to run slower than other tools, because it heavily relies on I/O operations and\nalways stores the result of one task as files on the disk.
\nOn large projects where a lot of automation is required, it can get very tedious to maintain complex and parallel running tasks.\nThe grunt configuration files sometimes simply don´t gave us the flexibility that we needed.
\nCurrently our preferred way to go is either simply use NPM scripts or rely on Webpack loaders for file preprocessing. For non-webpack projects we also utilize Gulp.
\n"}],"name":"grunt","title":"grunt.md","ring":4,"quadrant":"tools","body":"Grunt is a JavaScript task runner that automates repetitive tasks. While Grunt served us well for a good amount of projects,\nother alternatives such as Gulp emerged in the meantime and proved to be a better pick for the\nmajority of our teams.
\nWe have two main reasons for discarding Grunt in favor of other tools:
\nIf a decent amount of tasks is reached, Grunt is known to run slower than other tools, because it heavily relies on I/O operations and\nalways stores the result of one task as files on the disk.
\nOn large projects where a lot of automation is required, it can get very tedious to maintain complex and parallel running tasks.\nThe grunt configuration files sometimes simply don´t gave us the flexibility that we needed.
\nCurrently our preferred way to go is either simply use NPM scripts or rely on Webpack loaders for file preprocessing. For non-webpack projects we also utilize Gulp.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/grunt.md"},{"flag":"default","featured":false,"revisions":[{"name":"gulp","release":"2017-03-01","title":"Gulp","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/gulp.md","body":"Gulp is a javascript task runner much like Grunt. The tasks are written in javascript code.
\nIt is a tool that helps you automate numerous tasks surrounding web development. A typical use is to configure preprocessors for Sass, to compile CSS or to optimize CSS, Javascript and Images.
\nWith Gulp and its many plugins you can also do stuff such as start a web server and reload the browser if changes happen.
\nTo get started you need to install Gulp on your machine via npm.
\nnpm install gulp -g\n\n\nYou also need it locally in your project, so you have to install it as a dependency in your project .
\nnpm install gulp --save-dev\n\n\nYou can split your tasks into various smaller sub-tasks and even split it up into smaller files.
\nA basic Gulp task can look like this:
\nconst gulp = require('gulp');\n// Requires the gulp-sass plugin\nconst sass = require('gulp-sass');\nconst autoprefixer = require('gulp-autoprefixer');\nconst cssnano = require('gulp-cssnano');\n\ngulp.task('sass', function(){\n return gulp.src('app/scss/**/*.scss') // tell gulp where your source files are\n .pipe(sass()) // Converts sass into css with the help of a gulp plugin called gulp-sass\n .pipe(autoprefixer({browsers: ['last 2 versions']})) // auto prefixes the css for the last 2 versions of browser, like ie9 specific css\n .pipe(cssnano()) // minify the css\n .pipe(gulp.dest('app/css')) // tell gulp where to put the converted file. this is the first time where a file is written\n});\n\n\nyou can now run this task simply by executing the following command in your terminal:
\ngulp sass\n\n"}],"name":"gulp","title":"gulp.md","ring":1,"quadrant":"tools","body":"Gulp is a javascript task runner much like Grunt. The tasks are written in javascript code.
\nIt is a tool that helps you automate numerous tasks surrounding web development. A typical use is to configure preprocessors for Sass, to compile CSS or to optimize CSS, Javascript and Images.
\nWith Gulp and its many plugins you can also do stuff such as start a web server and reload the browser if changes happen.
\nTo get started you need to install Gulp on your machine via npm.
\nnpm install gulp -g\n\n\nYou also need it locally in your project, so you have to install it as a dependency in your project .
\nnpm install gulp --save-dev\n\n\nYou can split your tasks into various smaller sub-tasks and even split it up into smaller files.
\nA basic Gulp task can look like this:
\nconst gulp = require('gulp');\n// Requires the gulp-sass plugin\nconst sass = require('gulp-sass');\nconst autoprefixer = require('gulp-autoprefixer');\nconst cssnano = require('gulp-cssnano');\n\ngulp.task('sass', function(){\n return gulp.src('app/scss/**/*.scss') // tell gulp where your source files are\n .pipe(sass()) // Converts sass into css with the help of a gulp plugin called gulp-sass\n .pipe(autoprefixer({browsers: ['last 2 versions']})) // auto prefixes the css for the last 2 versions of browser, like ie9 specific css\n .pipe(cssnano()) // minify the css\n .pipe(gulp.dest('app/css')) // tell gulp where to put the converted file. this is the first time where a file is written\n});\n\n\nyou can now run this task simply by executing the following command in your terminal:
\ngulp sass\n\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/gulp.md"},{"flag":"changed","featured":false,"revisions":[{"name":"hal-hateoas","release":"2019-11-01","featured":false,"title":"hal-hateoas.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hal-hateoas.md","body":"We use HAL in cases where we need to link ressources in payloads. HATEOAS has not proven to be very useful in our projects.
\n"},{"name":"hal-hateoas","release":"2018-03-01","title":"HAL / HATEOAS","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/hal-hateoas.md","body":"We still recommend the usage of HAL and HATEOAS.
\nBut, depending on the resource structure, there are some pitfalls to be aware of:
\nHypermedia As The Engine Of Application State or in short HATEOAS is a pattern that helps to organize dependencies and resources in a RESTful API. The basic idea of HATEOAS is that an API consumer do not have to know how dependencies of resources are connected and how to get them. A consumer must only be familiar with the basics of hypermedia.
\nLet's assume we have a bank account and an action to deposit money on that account. Everything you need to know is that the account resource has an action for a deposit. The URL of that action can then fetched from the link attribute with the corresponding relation.
\n<account>\n <account_number>12345</account_number>\n <balance currency="usd">-25.00</balance>\n <link rel="deposit" href="https://bank.example.com/account/12345/deposit" />\n</account>\nBesides from HATEOAS there is an alternative implementation called Hypertext Application Language, in short HAL, which has much more features than the basic HATEOAS.
\nWith HAL you are allowed to also define parametrized links, embedded resources and documentation relations (which are called curies). You can find the specification here.\nhttp://stateless.co/hal_specification.html
\nIf you want to link different api endpoints or ressource locations in your API responses you should use this standard.
\n"}],"name":"hal-hateoas","title":"hal-hateoas.md","ring":2,"quadrant":"methods-and-patterns","body":"We use HAL in cases where we need to link ressources in payloads. HATEOAS has not proven to be very useful in our projects.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hal-hateoas.md"},{"flag":"changed","featured":true,"revisions":[{"name":"helm","release":"2019-11-01","title":"Helm","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/helm.md","body":"Helm is a package manager for Kubernetes, which simplifies the deployment\nof applications into a Kubernetes cluster and provides additional features like e.g. versioning and rollbacks.
\n"},{"name":"helm","release":"2018-03-01","title":"Helm","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/helm.md","body":"For managing deployments within Kubernetes we use Helm, which makes templating Kubernetes configuration files super easy (also known as Helm charts).
\n"}],"name":"helm","title":"Helm","ring":2,"quadrant":"platforms-and-aoe-services","body":"Helm is a package manager for Kubernetes, which simplifies the deployment\nof applications into a Kubernetes cluster and provides additional features like e.g. versioning and rollbacks.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/helm.md"},{"flag":"changed","featured":false,"revisions":[{"name":"hystrix","release":"2019-11-01","featured":false,"title":"hystrix.md","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hystrix.md","body":"Hystrix is not actively mainatined anymore and some of its goals can now be handled with service meshs.
\n"},{"name":"hystrix","release":"2017-03-01","title":"Hystrix ","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/hystrix.md","body":"Hystrix is a very powerful library for handling failures, fallbacks and latency management within complex distributed environments. Netflix developed it and after years of experience, they are using it in almost each of their microservices. It evolved to a great library for handling resilience in complex architectures and covers solutions for the most common resilience patterns like:
\nBeside from that purposes Hystrix also offers some helpful features like parallel and asynchronous execution, In-Request-Caching and other useful features for working with distributed systems.
\nAnother useful component that you are able to use with Hystrix is his dashboard that give you the ability of real time monitoring of external dependencies and how they behave. Alerting is also able via the dashboard.
\n"}],"name":"hystrix","title":"hystrix.md","ring":3,"quadrant":"tools","body":"Hystrix is not actively mainatined anymore and some of its goals can now be handled with service meshs.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/hystrix.md"},{"flag":"default","featured":false,"revisions":[{"name":"imgix","release":"2017-03-01","title":"imgix","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/imgix.md","body":"Imgix is an SaaS solution for delivering and processing images. When developing responsive websites, you will quickly reach the point where you need various versions of your images to achieve a good responsive user interface. You want high quality versions for retina displays but small versions for mobile devices with a slow Internet connection.
\nEspecially when dealing with user-generated uploads, it is getting hard to create different versions for any supported device and breakpoint of your web page. Doing this manually is hardly an option.
\nAt AOE, we decided to use imgix as an image processing service for some projects to solve this problem. The benefits of imgix are the simple API to create responsive images in real-time as well as the fast delivery over their CDN.
\n"}],"name":"imgix","title":"imgix.md","ring":3,"quadrant":"platforms-and-aoe-services","body":"Imgix is an SaaS solution for delivering and processing images. When developing responsive websites, you will quickly reach the point where you need various versions of your images to achieve a good responsive user interface. You want high quality versions for retina displays but small versions for mobile devices with a slow Internet connection.
\nEspecially when dealing with user-generated uploads, it is getting hard to create different versions for any supported device and breakpoint of your web page. Doing this manually is hardly an option.
\nAt AOE, we decided to use imgix as an image processing service for some projects to solve this problem. The benefits of imgix are the simple API to create responsive images in real-time as well as the fast delivery over their CDN.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/imgix.md"},{"flag":"changed","featured":true,"revisions":[{"name":"infrastructure-as-code","release":"2019-11-01","title":"Infrastructure as Code","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/infrastructure-as-code.md","body":"Updated to "adopt"
\n"},{"name":"infrastructure-as-code","release":"2017-03-01","title":"Infrastructure as Code","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/infrastructure-as-code.md","body":"Infrastructure as Code (IaC) describes the process of managing all infrastructure resources via code. Treating infrastructure code the same way we treat application code, we can benefit from the same advantages of having a history in our version control system, doing code reviews and rolling out updates via a Continuous Delivery pipeline in a way that closely approaches how we handle application deployments.
\nInfrastructure code is often described in a declarative language und the target platforms figure out what to create, update or delete in order to get to the desired state, while doing this in a safe and efficient way. We've worked with AWS CloudFormation in the past, and while this is a great tool, you can only manage AWS resources with it and you need some more tooling around it in order to automate things nicely and embed it into other processes such as Jenkins Jobs. That's what we created StackFormation for. Another tool that is actively developed is Terraform. Terraform comes with a lot of concepts that make managing environments easier out of the box and nicely embeds into other related tools. Also, Terraform allows you to manage a variety of different infrastructure providers.
\nInfrastructure as code should cover everything from orchestration of your infrastructure resources, networking and provisioning as well as monitoring setup. The orchestration tools mentioned above are supplemented by other tools such as Puppet, Chef or simple Bash scripts that take over provisioning the instances after they are booted.
\n"}],"name":"infrastructure-as-code","title":"Infrastructure as Code","ring":1,"quadrant":"platforms-and-aoe-services","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/infrastructure-as-code.md"},{"flag":"default","featured":true,"revisions":[{"name":"invision","release":"2018-03-01","title":"Invision","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/invision.md","body":"Invision is an online tool used to work and collaborate on design and prototypes and to share them between clients and the team.
\nWe use it in many projects now to present prototypes and designs and it helps in understanding the planned user experience.\nAlso, we use this directly as a reference from the user stories to help the development teams in understanding and implementing the right frontend and backend functionalities.
\n"}],"name":"invision","title":"Invision","ring":2,"quadrant":"tools","body":"Invision is an online tool used to work and collaborate on design and prototypes and to share them between clients and the team.
\nWe use it in many projects now to present prototypes and designs and it helps in understanding the planned user experience.\nAlso, we use this directly as a reference from the user stories to help the development teams in understanding and implementing the right frontend and backend functionalities.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/invision.md"},{"flag":"new","featured":true,"revisions":[{"name":"jaeger","release":"2019-11-01","title":"Jaeger","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/jaeger.md","body":"Jaeger is a tool for Distributed Tracing. Developed at Uber and inspired by Dapper and OpenZipkin it grew into an Cloud Native Computing Foundation project.
\nJaeger is a great tool for troubleshooting distributed systems, such as microservice architectures. Developers and Operation can quickly see communicaiton between services, and what data is communicated where.\nErrors in services can be traced to the originating system. Global trace identifiers are communicated using B3 headers. Jaeger supports Zipkin, which allows easy migration von OpenZipkin & co.
\n"}],"name":"jaeger","title":"Jaeger","ring":2,"quadrant":"platforms-and-aoe-services","body":"Jaeger is a tool for Distributed Tracing. Developed at Uber and inspired by Dapper and OpenZipkin it grew into an Cloud Native Computing Foundation project.
\nJaeger is a great tool for troubleshooting distributed systems, such as microservice architectures. Developers and Operation can quickly see communicaiton between services, and what data is communicated where.\nErrors in services can be traced to the originating system. Global trace identifiers are communicated using B3 headers. Jaeger supports Zipkin, which allows easy migration von OpenZipkin & co.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/jaeger.md"},{"flag":"default","featured":true,"revisions":[{"name":"jest","release":"2018-03-01","title":"Jest","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/jest.md","body":"Updated to "adopt".
\n"},{"name":"jest","release":"2017-03-01","title":"Jest ","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/jest.md","body":"Jest is a javascript testing framework by facebook to test javascript code and react applications / components.
\nWe started using Jest (and watchmen) instead of Karma because it:
\nIt is easy to set up. And even if you have a running setup with karma/chai you can easily replace karma with jest. With a small workaround, chai and jest test matchers work fine together.
\n"}],"name":"jest","title":"Jest","ring":1,"quadrant":"tools","body":"Updated to "adopt".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/jest.md"},{"flag":"default","featured":false,"revisions":[{"name":"job-dsl","release":"2017-03-01","title":"Job DSL (Jenkins)","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/job-dsl.md","body":"The Job DSL is a plugin for the Jenkins automation server. Jenkins jobs that automate parts of a software project are usually configured using the web interface of Jenkins. If Jenkins is the choice for your project and the number of build jobs tend to grow, the Job DSL plugin is your friend.
\nThe plugin allows Jenkins jobs to be described by code (Groovy DSL). This code is then used for generating Jenkins jobs. As a consequence, job configuration can be part of the project's source code. During the generation step, existing jobs are synchronized, overwritten or left alone, depending on the configuration. The same configuration manages deleting or ignoring jobs that are not described in code anymore. Jobs can easily be restored in case of data loss and changed without clicking buttons for hours. The automation also makes it easy to seed large numbers of homogeneous components and builds on different branches.
\nThe ability to treat Jenkins jobs as code is a big advantage. We highly suggest that every team automate the setup of their jobs and their pipelines. Another way of expressing build pipelines as code is the new Jenkins Pipeline feature - but still we see the need of Job DSL seeder jobs to seed the Jenkins pipeline jobs themselves and any additional jobs.
\n"}],"name":"job-dsl","title":"job-dsl.md","ring":2,"quadrant":"tools","body":"The Job DSL is a plugin for the Jenkins automation server. Jenkins jobs that automate parts of a software project are usually configured using the web interface of Jenkins. If Jenkins is the choice for your project and the number of build jobs tend to grow, the Job DSL plugin is your friend.
\nThe plugin allows Jenkins jobs to be described by code (Groovy DSL). This code is then used for generating Jenkins jobs. As a consequence, job configuration can be part of the project's source code. During the generation step, existing jobs are synchronized, overwritten or left alone, depending on the configuration. The same configuration manages deleting or ignoring jobs that are not described in code anymore. Jobs can easily be restored in case of data loss and changed without clicking buttons for hours. The automation also makes it easy to seed large numbers of homogeneous components and builds on different branches.
\nThe ability to treat Jenkins jobs as code is a big advantage. We highly suggest that every team automate the setup of their jobs and their pipelines. Another way of expressing build pipelines as code is the new Jenkins Pipeline feature - but still we see the need of Job DSL seeder jobs to seed the Jenkins pipeline jobs themselves and any additional jobs.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/job-dsl.md"},{"flag":"default","featured":true,"revisions":[{"name":"keycloak","release":"2018-03-01","title":"Keycloak","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/keycloak.md","body":"Most distributed systems still face a growing demand for user management, authentication, authorization and Single sign-on. In light of a growing security demand and specialization, the Open Source project JBoss Keycloak is a perfect match.
\nKeyloak has been a growing project from the outset and has a strong community. Keyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on the protocol level with a library is simple. Many configurable features require no coding in the integrated projects. The required configuration is managed via code and promoted as usual.
\nWe use Keycloak in our OM3 suite for several authentication-related use cases – such as user management for system users and Single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information. In addition, Keycloak is part of the AOE infrastructure and helps in securing the various services to support employees and customers.
\n"},{"name":"keycloak","release":"2017-03-01","title":"Keycloak","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/keycloak.md","body":"User management, authentication, authorization and Single Sign-On are part of most distributed systems nowadays. Building these sensitive and serious parts on your own might be a problem due to knowledge- and budget restrictions. Because of growing requirements in that field (social logins, single sign-on, federation, two-factor authentication, etc.), as well as growing security concerns, building these things on your own has become more challenging during the past decade.
\nAs a consequence, the recommendation is: use an existing solution and connect it with your project's codebase using provided standards. Our recommended solution is the Open Source project JBoss Keycloak. We use Keycloak in our OM3 suite for several authentication-related use cases - such as user management for system users and single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information.
\nKeyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by the Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on protocol level with a library is simple. A lot of configurable features require no coding in the integrated projects.
\nBy design, the Keycloak project offers customizability and extensibility via so-called SPIs, e.g. a custom authenticator can be implemented to address project specific problems.
\nKeycloak normally runs standalone and can use various database products. A docker image is available to start in a containerized environment.
\nKeycloak might be overkill, depending on your project needs. For a simple integration with, for instance, a social login provider (Facebock, Twitter, etc.) Keycloak might be too much. For a JVM project, the pac4j library might be an alternative. If a cloud-based solution is preferred and data privacy concerns are not an issue, Auth0 might be the choice.
\n"}],"name":"keycloak","title":"Keycloak","ring":1,"quadrant":"tools","body":"Most distributed systems still face a growing demand for user management, authentication, authorization and Single sign-on. In light of a growing security demand and specialization, the Open Source project JBoss Keycloak is a perfect match.
\nKeyloak has been a growing project from the outset and has a strong community. Keyloak is based on standards such as OAuth2, OIDC and SAML2. Securing a distributed system is supported by adapters, which are provided by Keycloak developers for different technology stacks. If there is no adapter for your technology stack, an integration on the protocol level with a library is simple. Many configurable features require no coding in the integrated projects. The required configuration is managed via code and promoted as usual.
\nWe use Keycloak in our OM3 suite for several authentication-related use cases – such as user management for system users and Single sign-on for customers. The OAuth access tokens can be used to secure APIs that access sensitive information. In addition, Keycloak is part of the AOE infrastructure and helps in securing the various services to support employees and customers.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/keycloak.md"},{"flag":"new","featured":true,"revisions":[{"name":"kotlin","release":"2019-11-01","title":"Kotlin","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/kotlin.md","body":"Kotlin is used successfully in production by multiple teams.
\nKotlin is 100% interoperable with Java. It means the code can live side-by-side in one code base and interact.\nFrom the beginning it was designed with practical thought in mind. So the IDE Support in IntelliJ is really great.
\nThe Spring Framework Developer put a lot of effort that Springs play well together with Kotlin.
\nWith it's concise syntax, null safety, \nDue to its explicit type system, this language is also great replacement for Groovy usage with Gradle.
\n"}],"name":"kotlin","title":"Kotlin","ring":1,"quadrant":"languages-and-frameworks","body":"Kotlin is used successfully in production by multiple teams.
\nKotlin is 100% interoperable with Java. It means the code can live side-by-side in one code base and interact.\nFrom the beginning it was designed with practical thought in mind. So the IDE Support in IntelliJ is really great.
\nThe Spring Framework Developer put a lot of effort that Springs play well together with Kotlin.
\nWith it's concise syntax, null safety, \nDue to its explicit type system, this language is also great replacement for Groovy usage with Gradle.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/kotlin.md"},{"flag":"default","featured":true,"revisions":[{"name":"kubernetes","release":"2018-03-01","title":"Kubernetes","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/kubernetes.md","body":"Kubernetes has developed into the quasi-standard for container orchestration: Nearly every cloud provider provides managed Kubernetes, and even Docker Enterprise uses Kubernetes.\nWe are running several production systems with Kubernetes and we are using it in concepts such as:
\nKubernetes is a container orchestration platform, which supports many different infrastructure providers. It allows you to deploy containers and takes care of running, scaling or self-healing your applications based on configurations you provide. It's based on years of knowledge and experience Google gained by using containers.
\nAt AOE, we started Kubernetes in a test environment on bare metal to experiment with it. It's currently used for running AOE internal apps such as dashboards as well as running builds in containers. We also started to use it for upcoming projects to run and manage several services. There are Tools to automate the setup of kubernetes in AWS like Cops. Another helpful tool is Minikube, which allows to test and run kubernetes locally.
\n"}],"name":"kubernetes","title":"Kubernetes","ring":1,"quadrant":"platforms-and-aoe-services","body":"Kubernetes has developed into the quasi-standard for container orchestration: Nearly every cloud provider provides managed Kubernetes, and even Docker Enterprise uses Kubernetes.\nWe are running several production systems with Kubernetes and we are using it in concepts such as:
\nRebuilding and packaging software from "third parties" (e.g. PHP, MySQL, Redis, Nginx, Java,...) implies starting to maintain the packaging for the desired distribution.
\nEven with tool support and targeted for automation, we found that building those packages is very often unstable. The effort to keep up with the upstream changes (security changes, fixes, etc...) exceeds the benefit in most cases. We prefer to not create our own packages and rather use what's available in the distribution repository.
\n"}],"name":"maintain-third-party-packages","title":"maintain-third-party-packages.md","ring":4,"quadrant":"platforms-and-aoe-services","body":"Rebuilding and packaging software from "third parties" (e.g. PHP, MySQL, Redis, Nginx, Java,...) implies starting to maintain the packaging for the desired distribution.
\nEven with tool support and targeted for automation, we found that building those packages is very often unstable. The effort to keep up with the upstream changes (security changes, fixes, etc...) exceeds the benefit in most cases. We prefer to not create our own packages and rather use what's available in the distribution repository.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/maintain-third-party-packages.md"},{"flag":"changed","featured":true,"revisions":[{"name":"micro-frontends","release":"2019-11-01","title":"Micro Frontends","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/micro-frontends.md","body":"When deciding on a system architecture we are always striving for technology neutralism. This is to allow us to stay\nflexible with future decisions. Micro Frontends can be a tool to support us with this goal.\nWe favor protocols and methods, such as plain HTML and HTTP, over specific technologies when designing Micro Frontends.
\nSince Micro Frontends have proven to allow use move fast and agile, we moved this pattern to "trial".
\n"},{"name":"micro-frontends","release":"2018-03-01","title":"Micro Frontends","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/micro-frontends.md","body":"We see many benefits in Microservices – especially in large teams – but often this architecture \ndoes not involve the user interface. Instead, you might end up maintaining a frontend monolith. With Micro Frontends \nyou enable your frontend developers to gain the same benefits that we have grown accustomed to in a Microservice architecture: \nDecoupled components, which are developed and deployed by independent teams. But what sounds reasonable comes with \nchallenges. Integrating different Frontends on the client- or server-side can be tricky, as well as keeping the overall \nUser Experience consistent.
\nDespite the challenges, Micro Frontends help us to develop large applications across multiple teams. Developers can\nwork more independently without having too much trouble maintaining a large codebase. Being able to update oder \nreplace Frontend libraries in some parts of the application is yet another benefit in the fast-moving world of \nfrontend development.
\n"}],"name":"micro-frontends","title":"Micro Frontends","ring":2,"quadrant":"methods-and-patterns","body":"When deciding on a system architecture we are always striving for technology neutralism. This is to allow us to stay\nflexible with future decisions. Micro Frontends can be a tool to support us with this goal.\nWe favor protocols and methods, such as plain HTML and HTTP, over specific technologies when designing Micro Frontends.
\nSince Micro Frontends have proven to allow use move fast and agile, we moved this pattern to "trial".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/micro-frontends.md"},{"flag":"default","featured":true,"revisions":[{"name":"microservices","release":"2018-03-01","title":"Microservices","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/microservices.md","body":"We continue to belief in the microservices concept and its related patterns and best practices. However, it's worth mentioning that we we had to learn some lessons when it came to resilient thinking and deployment-related dependencies between microservices.
\nWe feel that our microservice-based applications are more robust than monolithic ones have been. Thanks to the \nsplit of the overall complexity into multiple services, new employees or team members are becoming productive within days or a few weeks.
\nIn order to get microservices right and to benefit from the advantages, there is a lot more required.\nThe following "pyramid of need" for microservices shows this:\n
Microservices as an architecture style is getting very popular recently. At AOE, more and more teams are adding microservices to their existing application architecture or designing applications with microservices.
\nWe also like the term "self-contained systems" instead of microservices.
\nThe benefits we see are:
\nRelated patterns are Strategic Domain Driven Design as an approach to wisely cut your architecture according to useful bounded contexts and decide on the relevant communication and "translation" between the services.\nIn case you are looking for a small visualisation tool for your microservice architecture you might find vistecture useful.
\nAlso Resilience thinking is especially important when designing an application as a suite of microservices.
\n"}],"name":"microservices","title":"Microservices","ring":1,"quadrant":"methods-and-patterns","body":"We continue to belief in the microservices concept and its related patterns and best practices. However, it's worth mentioning that we we had to learn some lessons when it came to resilient thinking and deployment-related dependencies between microservices.
\nWe feel that our microservice-based applications are more robust than monolithic ones have been. Thanks to the \nsplit of the overall complexity into multiple services, new employees or team members are becoming productive within days or a few weeks.
\nIn order to get microservices right and to benefit from the advantages, there is a lot more required.\nThe following "pyramid of need" for microservices shows this:\n
Neo4j is one of the oldest Open Source Graph Databases. It's one of the rare NoSQL databases that is fully ACID-compliant. We see two main advantages of graph databases:
\nNeo4j database is implemented in Java and can therefore be embedded in your application if you live on the JVM.
\nYou can also choose to run it in a classic server mode, which then provides you with the possibility to either use its REST API or connect to it via the BOLT Driver, which has native bindings for the most popular languages.
\nThe cypher query language which comes with Neo4j is a declarative graph query language that allows for expressive and efficient querying and updating of the graph.
\nAt AOE, we use Neo4j mostly for explorative, interactive work with weakly structured or highly connected data, also we are evaluating this for knowledge-based recommendations in our Searchperience product.
\n"}],"name":"neo4j","title":"neo4j.md","ring":3,"quadrant":"platforms-and-aoe-services","body":"Neo4j is one of the oldest Open Source Graph Databases. It's one of the rare NoSQL databases that is fully ACID-compliant. We see two main advantages of graph databases:
\nNeo4j database is implemented in Java and can therefore be embedded in your application if you live on the JVM.
\nYou can also choose to run it in a classic server mode, which then provides you with the possibility to either use its REST API or connect to it via the BOLT Driver, which has native bindings for the most popular languages.
\nThe cypher query language which comes with Neo4j is a declarative graph query language that allows for expressive and efficient querying and updating of the graph.
\nAt AOE, we use Neo4j mostly for explorative, interactive work with weakly structured or highly connected data, also we are evaluating this for knowledge-based recommendations in our Searchperience product.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/neo4j.md"},{"flag":"new","featured":true,"revisions":[{"name":"next-js","release":"2019-11-01","title":"Next.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/next-js.md","body":"Next.js is a JavaScript and React based framework which makes use of server side rendering.
\n"}],"name":"next-js","title":"Next.js","ring":2,"quadrant":"languages-and-frameworks","body":"Next.js is a JavaScript and React based framework which makes use of server side rendering.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/next-js.md"},{"flag":"default","featured":true,"revisions":[{"name":"node-js","release":"2017-03-01","title":"node.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/node-js.md","body":"Node.js is a no- browser JavaScript execution runtime. Its basis is Google's V8 engine. Node is event-driven and follows a non-blocking I/O model.
\nIt’s a good choice for restful APIs, realtime purposes or situations where many concurrent connections are expected, where each connection has a lightweight memory footprint.
\nNode allows separation of concerns by using its package manager npm, which is also the largest ecosystem of Open Source libraries (modules).
\nModules are added as dependencies and offer a wide range of functionalities in a range from simple helper functions to mature web frameworks such as express.js.
\nMany PaaS providers (AWS, Google Cloud Platform, Azure) support node, including deployment and monitoring services out of the box for scalable stateless applications.
\nAt AOE, we successfully use node.js-based applications for smaller services or internal tools such dashboards.
\n"}],"name":"node-js","title":"node.js","ring":2,"quadrant":"languages-and-frameworks","body":"Node.js is a no- browser JavaScript execution runtime. Its basis is Google's V8 engine. Node is event-driven and follows a non-blocking I/O model.
\nIt’s a good choice for restful APIs, realtime purposes or situations where many concurrent connections are expected, where each connection has a lightweight memory footprint.
\nNode allows separation of concerns by using its package manager npm, which is also the largest ecosystem of Open Source libraries (modules).
\nModules are added as dependencies and offer a wide range of functionalities in a range from simple helper functions to mature web frameworks such as express.js.
\nMany PaaS providers (AWS, Google Cloud Platform, Azure) support node, including deployment and monitoring services out of the box for scalable stateless applications.
\nAt AOE, we successfully use node.js-based applications for smaller services or internal tools such dashboards.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/node-js.md"},{"flag":"new","featured":false,"revisions":[{"name":"nosql","release":"2019-11-01","title":"NoSQL","ring":2,"quadrant":"methods-and-patterns","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/nosql.md","body":"NoSQL technologies are established solutions that allows for scaling and handling big datasets.\nWe use Technologies like Redis, Elasticsearch and Neo4J but there are many others that are powering the NoSQL space.
\n"}],"name":"nosql","title":"NoSQL","ring":2,"quadrant":"methods-and-patterns","body":"NoSQL technologies are established solutions that allows for scaling and handling big datasets.\nWe use Technologies like Redis, Elasticsearch and Neo4J but there are many others that are powering the NoSQL space.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/nosql.md"},{"flag":"default","featured":false,"revisions":[{"name":"npm","release":"2017-03-01","title":"NPM","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/npm.md","body":"NPM is one of, if not the most, popular package manager for JavaScript. Because of the big community, you can find nearly every dependency in npm.
\nInstead of other package managers such as bower, you have to write your packages as modules. This unifies the way you have to use, test and, of course, understand dependencies.
\nNPM creates a tree for your dependencies and their nesting dependencies. Because of this, you don't need to handle version conflicts, since every dependency uses there own version of e.g. webpack.
\nWith shrinkwrap you have a robust tool to lock down and manage the versions of your dependencies - following the Pin (external) dependencies approach.
\nFor each package you have to classify your dependencies:
\nWith scripts you get support for the most common build lifecycle steps, e.g. build, start, test ...
\nOther useful features:
\nNPM is one of, if not the most, popular package manager for JavaScript. Because of the big community, you can find nearly every dependency in npm.
\nInstead of other package managers such as bower, you have to write your packages as modules. This unifies the way you have to use, test and, of course, understand dependencies.
\nNPM creates a tree for your dependencies and their nesting dependencies. Because of this, you don't need to handle version conflicts, since every dependency uses there own version of e.g. webpack.
\nWith shrinkwrap you have a robust tool to lock down and manage the versions of your dependencies - following the Pin (external) dependencies approach.
\nFor each package you have to classify your dependencies:
\nWith scripts you get support for the most common build lifecycle steps, e.g. build, start, test ...
\nOther useful features:
\nThe OpenAPI Specification is becoming a broadly adopted industry standard for describing modern REST APIs. Other initiatives like RAML have joined the OpenAPI Initiative.
\nOpenAPI v2 version is basically the former Swagger - and Swagger provides useful tools for OpenAPI like the online editor and viewer http://editor.swagger.io/\nWe have also found that this version currently have a good tool support accross languages, so you will find API client and server generation tools for a lot of languages, which makes it quite easy to connect to an API that is described in OpenAPI standard.
\nOpenAPI v3
\nOpenAPI v3 adds more features to the specification - for example the ability to describe APIs supporting request/callback pattern.
\nThere is a very good api designer https://www.apicur.io/ and a good mock generator http://microcks.github.io/index.html
\nThe general tool support is excellent. See https://openapi.tools/
\n"}],"name":"open-api","title":"Open API","ring":1,"quadrant":"tools","body":"The OpenAPI Specification is becoming a broadly adopted industry standard for describing modern REST APIs. Other initiatives like RAML have joined the OpenAPI Initiative.
\nOpenAPI v2 version is basically the former Swagger - and Swagger provides useful tools for OpenAPI like the online editor and viewer http://editor.swagger.io/\nWe have also found that this version currently have a good tool support accross languages, so you will find API client and server generation tools for a lot of languages, which makes it quite easy to connect to an API that is described in OpenAPI standard.
\nOpenAPI v3
\nOpenAPI v3 adds more features to the specification - for example the ability to describe APIs supporting request/callback pattern.
\nThere is a very good api designer https://www.apicur.io/ and a good mock generator http://microcks.github.io/index.html
\nThe general tool support is excellent. See https://openapi.tools/
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/open-api.md"},{"flag":"default","featured":false,"revisions":[{"name":"oro-platform","release":"2017-03-01","title":"Oro Platform","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/oro-platform.md","body":"OroPlatform is a framework built on Symfony 2 with the purpose of providing the features you need in every business application that is not your core business logic. Hence, it serves you with a basic application, providing login and complex security, menus and menu management, history, audit trails, settings management, etc. It comes complete with a design and many widgets to be utilized in own entities. Other Features of OroPlatform are, for example, a WebSocket server-driven user interface, queue-based task runners, REST Interface, as well as messaging- and workflow systems.
\nOne of the central features is that entities, which are to be managed within the system, can be set up completely by configuring them using the UI. This in itself implies that it puts another abstraction layer upon doctrine and symfony defaults.
\nAs with every framework or application, the general-purpose goals and abstraction comes with drawbacks: In fact, OroPlatform modifies and extends the common way of doing things in Symfony in several places, which makes the developer's life hard at times. Also, the UI and package managing are set in such a way that they are hard to extend or replace. The many additional abstraction layers can result in decreased performance.
\nOn the other hand, OroPlatform gives you a good headstart for prototyping and frees you from rebuilding common requirements - which makes it a relevant choice for business applications with the need to manage several entities in a backend. Also, projects such Akeneo or OroCRM use OroPlatform with success.
\nSince the project is still young, the future development and improvements need to be watched. We classified the Framework as Assess.
\n"}],"name":"oro-platform","title":"oro-platform.md","ring":3,"quadrant":"tools","body":"OroPlatform is a framework built on Symfony 2 with the purpose of providing the features you need in every business application that is not your core business logic. Hence, it serves you with a basic application, providing login and complex security, menus and menu management, history, audit trails, settings management, etc. It comes complete with a design and many widgets to be utilized in own entities. Other Features of OroPlatform are, for example, a WebSocket server-driven user interface, queue-based task runners, REST Interface, as well as messaging- and workflow systems.
\nOne of the central features is that entities, which are to be managed within the system, can be set up completely by configuring them using the UI. This in itself implies that it puts another abstraction layer upon doctrine and symfony defaults.
\nAs with every framework or application, the general-purpose goals and abstraction comes with drawbacks: In fact, OroPlatform modifies and extends the common way of doing things in Symfony in several places, which makes the developer's life hard at times. Also, the UI and package managing are set in such a way that they are hard to extend or replace. The many additional abstraction layers can result in decreased performance.
\nOn the other hand, OroPlatform gives you a good headstart for prototyping and frees you from rebuilding common requirements - which makes it a relevant choice for business applications with the need to manage several entities in a backend. Also, projects such Akeneo or OroCRM use OroPlatform with success.
\nSince the project is still young, the future development and improvements need to be watched. We classified the Framework as Assess.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/oro-platform.md"},{"flag":"new","featured":true,"revisions":[{"name":"packer","release":"2019-11-01","title":"Packer","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/packer.md","body":"Hashicorp Packer is a lightweight tool which automates the creation of any type of machine images (Machine Image As A Code) for multiple platforms. \nPacker is not a replacement of configuration management tools like Ansible. Packer works with tools like ansible to install software while creating images. \nPacker uses a configuration file to create a machine image. It uses the concepts of builders to spin up an instance, run provisioners to configure applications or services. \nOnce setup is done, it shuts the instance down and save new baked machine instance with any needed post-processing. \nPacker only builds images. But once you have them you can deploy your infrastructure quickly and even scale by spawning any number of instances without doing extra configuration. \nAnother benefit is, that machine images can be tested to verify if they are working correctly.\nPacker supports multiple cloud providers like AWS, GCP, Digital Ocean etc.
\nMachine images are important for modern deployment pipelines and fast ramp of of new infrastructure. \nWe are using Packer to build so called "Golden images" that are used in our Infrastructure as Code based provisionings.
\n"}],"name":"packer","title":"Packer","ring":1,"quadrant":"platforms-and-aoe-services","body":"Hashicorp Packer is a lightweight tool which automates the creation of any type of machine images (Machine Image As A Code) for multiple platforms. \nPacker is not a replacement of configuration management tools like Ansible. Packer works with tools like ansible to install software while creating images. \nPacker uses a configuration file to create a machine image. It uses the concepts of builders to spin up an instance, run provisioners to configure applications or services. \nOnce setup is done, it shuts the instance down and save new baked machine instance with any needed post-processing. \nPacker only builds images. But once you have them you can deploy your infrastructure quickly and even scale by spawning any number of instances without doing extra configuration. \nAnother benefit is, that machine images can be tested to verify if they are working correctly.\nPacker supports multiple cloud providers like AWS, GCP, Digital Ocean etc.
\nMachine images are important for modern deployment pipelines and fast ramp of of new infrastructure. \nWe are using Packer to build so called "Golden images" that are used in our Infrastructure as Code based provisionings.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/packer.md"},{"flag":"default","featured":true,"revisions":[{"name":"pact","release":"2018-03-01","title":"PACT","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pact.md","body":"PACT (http://pact.io/) is a family of frameworks that provides support for Consumer Driven Contract testing accross different langauages and frameworks.
\nConsumer Driven Contract testing is a pattern for testing interfaces/boundaries between services.
\nIt allows "consumers" to run tests against a defined Mock and record the defined interactions (=PACT).\nIt puts "providers" in the position to run the PACT tests inside theire Continuous Integration Pipelines, so that the provider knows if he might break any consumers.
\nThis approach makes sense in organisations where teams collaborate more closely (See Strategic Domain Driven Design ), e.g. to build Microservice oriented architectures
\nConsumer Driven Contract Testing and how it can be conducted with PACT is documented very nicely on the official PACT website: https://docs.pact.io/.
\n"}],"name":"pact","title":"PACT","ring":2,"quadrant":"tools","body":"PACT (http://pact.io/) is a family of frameworks that provides support for Consumer Driven Contract testing accross different langauages and frameworks.
\nConsumer Driven Contract testing is a pattern for testing interfaces/boundaries between services.
\nIt allows "consumers" to run tests against a defined Mock and record the defined interactions (=PACT).\nIt puts "providers" in the position to run the PACT tests inside theire Continuous Integration Pipelines, so that the provider knows if he might break any consumers.
\nThis approach makes sense in organisations where teams collaborate more closely (See Strategic Domain Driven Design ), e.g. to build Microservice oriented architectures
\nConsumer Driven Contract Testing and how it can be conducted with PACT is documented very nicely on the official PACT website: https://docs.pact.io/.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pact.md"},{"flag":"default","featured":false,"revisions":[{"name":"pair-working","release":"2017-03-01","title":"Pair working","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pair-working.md","body":"We summarized the practices of pair programming and administrating as pair working.
\nDerived as a practice from eXtreme Programming (XP), pair programming is a method/pattern that aims for fine-scaled feedback within a team.
\nAt AOE, some developers and operators work in pairs, not constantly, but from time to time. Most teams have positive experiences using this method, but not all teams tried the by-the-book-approach (driver and navigator principle). Especially for non-trival tasks, pair working results in rapid knowlegde exchange and better results with less bugs. We encourage the teams to try this approach more often.
\n"}],"name":"pair-working","title":"pair-working.md","ring":2,"quadrant":"methods-and-patterns","body":"We summarized the practices of pair programming and administrating as pair working.
\nDerived as a practice from eXtreme Programming (XP), pair programming is a method/pattern that aims for fine-scaled feedback within a team.
\nAt AOE, some developers and operators work in pairs, not constantly, but from time to time. Most teams have positive experiences using this method, but not all teams tried the by-the-book-approach (driver and navigator principle). Especially for non-trival tasks, pair working results in rapid knowlegde exchange and better results with less bugs. We encourage the teams to try this approach more often.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pair-working.md"},{"flag":"default","featured":false,"revisions":[{"name":"phan","release":"2017-03-01","title":"phan","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/phan.md","body":"Phan is a static code analyzer for PHP7, which is very fast, since it uses the PHP 7 AST (abstract syntax tree). Phan basically offers some of the safety that otherwise only compiled type-safe languages have - such as checking function references and return types.
\nWe expect at least the following benefits:
\nWe think Phan can be used in the deployment pipeline or as commit hooks for PHP 7-based applications. For a full Feature list check here.
\n"}],"name":"phan","title":"phan.md","ring":3,"quadrant":"tools","body":"Phan is a static code analyzer for PHP7, which is very fast, since it uses the PHP 7 AST (abstract syntax tree). Phan basically offers some of the safety that otherwise only compiled type-safe languages have - such as checking function references and return types.
\nWe expect at least the following benefits:
\nWe think Phan can be used in the deployment pipeline or as commit hooks for PHP 7-based applications. For a full Feature list check here.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/phan.md"},{"flag":"default","featured":false,"revisions":[{"name":"php7-over-php5","release":"2017-03-01","title":"PHP7 over PHP5","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/php7-over-php5.md","body":"PHP 5 has been around for a very long time, and can be considered as the PHP version that defined where PHP wants to go in the future.\nWith proper OOP, support for clojures and a steadily improving type system, it has become a very mature language.\nHowever, in the past 3 years, Facebook introduced HHVM, which became a major influence on PHP 7 and eventually brought a lot of improvements not only for the execution speed, but also with proper type hints and other features.
\nHere at AOE, we have numerous PHP projects, and we often kept it backwards-compatible to make sure that it will run on older systems. This is comparable to the procedure most frameworks (Magento, OroPlatform and derived projects) use.
\nNow, PHP 5 has reached its end--of-life, and it is time to discontinue the backqards-compatibility in favor of better and more stable applications.\nEven though we can use the PHP 7 runtime while being PHP 5-compatible, it is not considered good practice anymore, as we can now rely on the PHP 7 features and use all of its advantages.
\nOne of the major points PHP 7 supports is proper typehinting and return types (apart from PhpDocs), which makes static analysis much easier and can improve the overall code quality significantly.
\n"}],"name":"php7-over-php5","title":"php7-over-php5.md","ring":1,"quadrant":"languages-and-frameworks","body":"PHP 5 has been around for a very long time, and can be considered as the PHP version that defined where PHP wants to go in the future.\nWith proper OOP, support for clojures and a steadily improving type system, it has become a very mature language.\nHowever, in the past 3 years, Facebook introduced HHVM, which became a major influence on PHP 7 and eventually brought a lot of improvements not only for the execution speed, but also with proper type hints and other features.
\nHere at AOE, we have numerous PHP projects, and we often kept it backwards-compatible to make sure that it will run on older systems. This is comparable to the procedure most frameworks (Magento, OroPlatform and derived projects) use.
\nNow, PHP 5 has reached its end--of-life, and it is time to discontinue the backqards-compatibility in favor of better and more stable applications.\nEven though we can use the PHP 7 runtime while being PHP 5-compatible, it is not considered good practice anymore, as we can now rely on the PHP 7 features and use all of its advantages.
\nOne of the major points PHP 7 supports is proper typehinting and return types (apart from PhpDocs), which makes static analysis much easier and can improve the overall code quality significantly.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/php7-over-php5.md"},{"flag":"default","featured":true,"revisions":[{"name":"pin-external-dependencies","release":"2017-03-01","title":"Pin external dependencies","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pin-external-dependencies.md","body":"A lot of applications have dependencies on other modules or components. We have\nused different approaches regarding how and when these dependencies are resolved\nand have agreed on using a method we call "Pin (External) dependencies".
\nThis is especially relevant for script languages, where the dependency\nmanagement references the code and not immutable prebuild binaries - and\ntherefore resolves the complete transient dependencies on the fly.
\nMost of these package- or dependency management solutions support two artefacts:
\nWe suggest the following:
\nFor updating of dependencies define a process in the team. This can either be\ndone on the dev-system or in a seperate automated CI job - both resulting in\nupdated dependency definitions in the applications VCS.
\n"}],"name":"pin-external-dependencies","title":"Pin external dependencies","ring":1,"quadrant":"methods-and-patterns","body":"A lot of applications have dependencies on other modules or components. We have\nused different approaches regarding how and when these dependencies are resolved\nand have agreed on using a method we call "Pin (External) dependencies".
\nThis is especially relevant for script languages, where the dependency\nmanagement references the code and not immutable prebuild binaries - and\ntherefore resolves the complete transient dependencies on the fly.
\nMost of these package- or dependency management solutions support two artefacts:
\nWe suggest the following:
\nFor updating of dependencies define a process in the team. This can either be\ndone on the dev-system or in a seperate automated CI job - both resulting in\nupdated dependency definitions in the applications VCS.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pin-external-dependencies.md"},{"flag":"default","featured":true,"revisions":[{"name":"pipeline-as-code","release":"2018-03-01","title":"Pipeline as Code","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pipeline-as-code.md","body":"We moved this pattern to adopt, because it is used by nearly every team and project now and is an important part of our automation.
\nFor Jenkins, we often use a mix of Job DSL and Jenkins Pipelines and recently also used Gitlab Pipelines.
\n"},{"name":"pipeline-as-code","release":"2017-03-01","title":"Pipeline as Code","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/pipeline-as-code.md","body":"Continuous Integration and Delivery is a critical part of our development and deployment process at AOE. Using Jenkins for many years the "instructions" how to build, test and deploy applications were scattered between many custom scripts and the pipeline was often maintained by manual maintenance of Jenkins jobs. Soon, we realized that we need a more native way to express the full CI/CD pipeline process in code and manage it in version control.
\nBeing an important part of each project, the pipeline configuration should be managed as code and rolled out automatically - this also allows us to manage the pipeline itself applying the same standards that apply to application code.
\nWhile some teams started using Jenkins' JobDSL plugin, others explored the new Jenkins Pipeline - in both ways, the build artifacts should be published to an artifact repository such as Artifactory.
\n"}],"name":"pipeline-as-code","title":"Pipeline as Code","ring":1,"quadrant":"methods-and-patterns","body":"We moved this pattern to adopt, because it is used by nearly every team and project now and is an important part of our automation.
\nFor Jenkins, we often use a mix of Job DSL and Jenkins Pipelines and recently also used Gitlab Pipelines.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/pipeline-as-code.md"},{"flag":"new","featured":true,"revisions":[{"name":"plant-uml","release":"2019-11-01","title":"Plant UML","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/plant-uml.md","body":"PlantUML is an open source project that allows to create UML diagrams in a text-based and declarative way.
\nSince it is integrated in tools like Confluence, IntelliJ and Gitlab we use it a lot to quickly document results of software design sessions.
\nAnother similar tools that use just plain javascript to render the diagrams is mermaid
\n"}],"name":"plant-uml","title":"Plant UML","ring":2,"quadrant":"tools","body":"PlantUML is an open source project that allows to create UML diagrams in a text-based and declarative way.
\nSince it is integrated in tools like Confluence, IntelliJ and Gitlab we use it a lot to quickly document results of software design sessions.
\nAnother similar tools that use just plain javascript to render the diagrams is mermaid
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/plant-uml.md"},{"flag":"default","featured":false,"revisions":[{"name":"play-framework","release":"2017-03-01","title":"Play Framework","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/play-framework.md","body":"The Play Framework is a lightweight (web)application framework for Java and Scala programmers.
\nA developer can choose from different modules to include necessary functionality such s accessing http resources, databases, and so on. As a consequence, the developer can choose, and is not distracted by or clobbered with irrelevant things. This approach is considered as minimalistic, but it is easy to include necessary functionality.
\nRegarding the architecture, Play is stateless and built on Akka. As a consequence, Play applications have much lower resource consumption regarding CPU und memory and can scale easily. Play manages concurrency without binding a request to a thread until the response is ready.
\nWith the use of "Futures" in your code you can turn synchronous tasks (such as IO or API call to another service) into asynchronous and you can build non-blocking applications. It is recommended to understand the principles Play uses to achieve performance and scalability.
\nPlay can act as backend service delivering JSON, for esample. For building web applications. the Twirl template engine enables server-side rendering of html pages. These html pages can include css and java script parts of your own choice.
\n"}],"name":"play-framework","title":"play-framework.md","ring":1,"quadrant":"languages-and-frameworks","body":"The Play Framework is a lightweight (web)application framework for Java and Scala programmers.
\nA developer can choose from different modules to include necessary functionality such s accessing http resources, databases, and so on. As a consequence, the developer can choose, and is not distracted by or clobbered with irrelevant things. This approach is considered as minimalistic, but it is easy to include necessary functionality.
\nRegarding the architecture, Play is stateless and built on Akka. As a consequence, Play applications have much lower resource consumption regarding CPU und memory and can scale easily. Play manages concurrency without binding a request to a thread until the response is ready.
\nWith the use of "Futures" in your code you can turn synchronous tasks (such as IO or API call to another service) into asynchronous and you can build non-blocking applications. It is recommended to understand the principles Play uses to achieve performance and scalability.
\nPlay can act as backend service delivering JSON, for esample. For building web applications. the Twirl template engine enables server-side rendering of html pages. These html pages can include css and java script parts of your own choice.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/play-framework.md"},{"flag":"changed","featured":true,"revisions":[{"name":"ports-and-adapters","release":"2019-11-01","title":"Ports and Adapters","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ports-and-adapters.md","body":"Updated to "adopt"
\n"},{"name":"ports-and-adapters","release":"2018-03-01","title":"Ports and Adapters","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/ports-and-adapters.md","body":"Ports and Adapters is an architecture or layering approach for software design. As with other layering approaches, it seperates different concerns in different layers, where dependencies are only allowed from the outside to the inside.
\nWe use "ports and adapters" with success for (larger) applications, which contain certain business logic and/or provide several ways to access the services.\nWe often use the approach hand-in-hand with Domain Driven Design. In comparison with other layering patterns (e.g. layered architecture) it allows you to have a true technology-free core (domain) model. Why? Because, with the concept of "secondary ports" (=interfaces), it inverts the control and allows outer layers to provide adapters (=implementations of the defined interface).\nIt also defines clear boundaries regarding where to put what logic of your application.
\nYou can find out more about the details and its origins in well-known blog posts such as The Clean Architecture or Hexagonal architecture
\nIn short, here is how we often layer such applications:
\nThese layers belong to every bounded context (modules) inside the application.
\nAre you searching for a potential timeless architecture for your critical application? Try implementing a potent technology-free domain model in the core layer and use ports and adapters to layer your application.
\n"}],"name":"ports-and-adapters","title":"Ports and Adapters","ring":1,"quadrant":"methods-and-patterns","body":"Updated to "adopt"
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/ports-and-adapters.md"},{"flag":"default","featured":true,"revisions":[{"name":"postcss","release":"2017-03-01","title":"PostCSS","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/postcss.md","body":"PostCSS is a tool for transforming stylesheets with JavaScript plugins. It comes with a parser that reads your CSS file into an AST, pipes it through the loaded plugins and finally\nstringifies it back into a (transformed) CSS output file.
\nWe at AOE love PostCSS because it gives us the power to use CSS Modules, which finally ends the curse of global CSS.
\nIt also has a huge list of more than 350 other available plugins.\nSure, not all of them are useful, but the sheer number of plugins shows how easy it is to write your own plugin for it.\nIn fact, it´s just a matter of writing a single JS function.
\nFinally, PostCSS is very fast and easy to setup because it runs 100% in JavaScript.\nCompared to SASS as a preprocessor, it feels much more powerful but at the same time less bloated with superfluous functionality because everything comes in its own little plugin
\n"}],"name":"postcss","title":"PostCSS","ring":1,"quadrant":"languages-and-frameworks","body":"PostCSS is a tool for transforming stylesheets with JavaScript plugins. It comes with a parser that reads your CSS file into an AST, pipes it through the loaded plugins and finally\nstringifies it back into a (transformed) CSS output file.
\nWe at AOE love PostCSS because it gives us the power to use CSS Modules, which finally ends the curse of global CSS.
\nIt also has a huge list of more than 350 other available plugins.\nSure, not all of them are useful, but the sheer number of plugins shows how easy it is to write your own plugin for it.\nIn fact, it´s just a matter of writing a single JS function.
\nFinally, PostCSS is very fast and easy to setup because it runs 100% in JavaScript.\nCompared to SASS as a preprocessor, it feels much more powerful but at the same time less bloated with superfluous functionality because everything comes in its own little plugin
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/postcss.md"},{"flag":"new","featured":false,"revisions":[{"name":"postgres","release":"2019-11-01","title":"PostgreSQL","ring":1,"quadrant":"tools","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postgres.md","body":"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
\n"}],"name":"postgres","title":"PostgreSQL","ring":1,"quadrant":"tools","body":"PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postgres.md"},{"flag":"new","featured":false,"revisions":[{"name":"postman","release":"2019-11-01","title":"Postman","ring":2,"quadrant":"tools","featured":false,"fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postman.md","body":"Postman is an API testing and documentation tool. Requests can be bundled into folders \nand easily be configured to be executed against multiple environments. Responses can be evaluated using the "test" feature.
\nEven automated testing is possible using Newman as an addition to Postman.
\n"}],"name":"postman","title":"Postman","ring":2,"quadrant":"tools","body":"Postman is an API testing and documentation tool. Requests can be bundled into folders \nand easily be configured to be executed against multiple environments. Responses can be evaluated using the "test" feature.
\nEven automated testing is possible using Newman as an addition to Postman.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/postman.md"},{"flag":"default","featured":false,"revisions":[{"name":"protobuf","release":"2017-03-01","title":"Protobuf","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/protobuf.md","body":"In an increasingly microservice-oriented environment, it is crucial that all parties agree on a common language and wire format for data exchange.
\nJSON and XML are two well-known formats for serialization of data; however, they come with a few drawbacks. JSON is completely dynamic without any validation (though there is json-schema) and XML uses an extremely heavyweight syntax, which carries a huge overhead, so parsing and transport becomes quite slow.
\nProtobuf, amongst others, is an approach to solving this problem by using well-defined schemas to create language-specific code, which serializes/marshals and deserializes/unmarshals data. One of the key features is the built-in support for evolving schemas; it is easily possible to incrementally extend the definition while staying backwards-compatible and compose messages consisting of several sub-messages.
\nIf you are looking for a way to have different systems agree on a common protocol on top of a transport layer (such as AMQP or HTTP), Protobuf is definitely worth examining more closely and should be assessed.
\n"}],"name":"protobuf","title":"protobuf.md","ring":3,"quadrant":"languages-and-frameworks","body":"In an increasingly microservice-oriented environment, it is crucial that all parties agree on a common language and wire format for data exchange.
\nJSON and XML are two well-known formats for serialization of data; however, they come with a few drawbacks. JSON is completely dynamic without any validation (though there is json-schema) and XML uses an extremely heavyweight syntax, which carries a huge overhead, so parsing and transport becomes quite slow.
\nProtobuf, amongst others, is an approach to solving this problem by using well-defined schemas to create language-specific code, which serializes/marshals and deserializes/unmarshals data. One of the key features is the built-in support for evolving schemas; it is easily possible to incrementally extend the definition while staying backwards-compatible and compose messages consisting of several sub-messages.
\nIf you are looking for a way to have different systems agree on a common protocol on top of a transport layer (such as AMQP or HTTP), Protobuf is definitely worth examining more closely and should be assessed.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/protobuf.md"},{"flag":"default","featured":false,"revisions":[{"name":"puppet-environments","release":"2018-03-01","title":"Puppet Environments","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/puppet-environments.md","body":"Puppet Environments has proven to work well for our projects using Puppet.
\n"},{"name":"puppet-environments","release":"2017-03-01","title":"Puppet Environments","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/puppet-environments.md","body":"Puppet is an Open Source configuration management tool. It is used by a wide range of different companies world-wide, e.g. the Wikimedia Foundation, Mozilla, Reddit, CERN, Dell, Rackspace, Twitter, the New York Stock Exchange, PayPal, Disney, Citrix Systems, Spotify, Oracle, the University of California Los Angeles, the University of North Texas, QVC, Intel, Google and others.
\nPuppet has been the basic tool to address Continuous Configuration Automation (CCA) in AOE's Infrastructure as Code strategy (IaC) for more than 4 years.
\nIntended to give projects the means to develop and maintain their own infrastructure, separated and not influenced by other projects, Puppet environments, together with Puppet module versioning and ENC, have been introduced.\\\nPuppet Environments are rated "Trial". It supports our strategy of Infrastructure as Code (IaC) and links it to our DevOps approach, enabling project teams to set up and customize their own infrastructure.
\nTeams that want to use the Puppet Environments service from the AOE IT Team will find detailed information about the implemented CI/CD process for this.
\n"}],"name":"puppet-environments","title":"puppet-environments.md","ring":2,"quadrant":"platforms-and-aoe-services","body":"Puppet Environments has proven to work well for our projects using Puppet.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/puppet-environments.md"},{"flag":"changed","featured":true,"revisions":[{"name":"rabbitmq","release":"2019-11-01","title":"RabbitMQ","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/rabbitmq.md","body":"RabbitMQ has proven to work very well for messaging in our projects, thats why we updated it to "adopt".
\n"},{"name":"rabbitmq","release":"2017-03-01","title":"RabbitMQ","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rabbitmq.md","body":"RabbitMQ is an Open Source message broker - implementing the Advanced Message Queuing Protocol (AMQP) protocol. It provides a reliable and scalable way to transport data between loosely coupled applications, using different EAI patterns such as the Publish & Subscriber pattern. AMQP supports direct and fan-out exchanges (broadcasts) as well as topics. Queuing mechanisms allow for robust architectures, mitigating the risks of application downtimes. Typically, a RabbitMQ server can easily buffer millions of messages. RabbitMQ supports JMS in addition to AMQP. It is not intended to use JMS for new systems, but it makes RabbitMQ useful for integrating legacy systems.
\nThere are several alternative solutions to RabbitMQ, e. g. the free Apache ActiveMQ, which is integrated in Anypoint platform. ActiveMQ implements a somewhat simpler routing concept than RabbitMQ, but offers more protocols. Commercial products in this area are offered by IBM (Websphere MQ), Fiorano and almost every vendor of ESB products.
\nWe use RabbitMQ internally for transferring messages safely in our logging ecosystem between Logstash proxies and servers using direct and fan-out exchanges for delivering messages to appropriate destinations. RabbitMQ is also used to asynchronously trigger Jenkins jobs from our SCMs to mitigate heavy load on the SCMs, usually caused by Jenkins polls for SCM changes. Additionally, some critical events for monitoring are using RabbitMQ for guaranteed notification.
\nRabbitMQ is rated "Trial". It fits into our approach to build robust, resilient systems and use asyncronous messages for loosely coupled communications between components. In practice, RabbitMQ proved to be stable and dealt well with service interruptions from failures and maintenance slots. A common pain point is RabbitMQ as a single point of failure disrupting the data flow in a system. This issue is currently approached by setting up a HA cluster for RabbitMQ. The outcome of this approach will clarify the extent of future usage of RabbitMQ in our systems.
\n 
RabbitMQ has proven to work very well for messaging in our projects, thats why we updated it to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/rabbitmq.md"},{"flag":"changed","featured":true,"revisions":[{"name":"raml","release":"2019-11-01","title":"RAML","ring":4,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/raml.md","body":"Since the RAML project has decided to join the OpenAPI initiative and the RAML ecosystem lacks further development and additional tools, we decided to use and recommend using "OpenAPI specififcation (OAS)" as description standard instead.
\nRAML still provides advantages in modeling an API through it's more expressive modeling language and can produce OAS
\n"},{"name":"raml","release":"2017-03-01","title":"RAML","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/raml.md","body":"RAML (the RESTful API Modelling Language) is a YAML-based API specification language. It's now available in version 1.0. The philosophy behind it is to specify the API before implementation.
\nIf you follow this philosophy, you can design your API and discuss it with your clients and team before implementing a single line of code. API consumers are able to implement against the API before it's really up and running. The api-console provides a beautiful online documentation with "try it" features for your raml definition.
\nThe RAML ecosystem provides a rich toolset for code generation (e.g. online editor; api-workbench), automatically generated documentation, code generation (e.g. go-raml), mocking, testing and much more. We prefer RAML over Swagger because of this.
\n"}],"name":"raml","title":"RAML","ring":4,"quadrant":"languages-and-frameworks","body":"Since the RAML project has decided to join the OpenAPI initiative and the RAML ecosystem lacks further development and additional tools, we decided to use and recommend using "OpenAPI specififcation (OAS)" as description standard instead.
\nRAML still provides advantages in modeling an API through it's more expressive modeling language and can produce OAS
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/raml.md"},{"flag":"default","featured":true,"revisions":[{"name":"react","release":"2018-03-01","title":"React.js","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/react.md","body":"The past months have shown that React is still a great fit for us for frontend-heavy\napplications. With its rewritten core in version 16, Facebook shows how\nimportant this framework is for them. Therefore, Facebook is investing a lot of effort into React and a\nhealthy community. In addition, we REALLY enjoy writing React\ncomponents – so much so, that we have to move this library into adopt!
\n"},{"name":"react","release":"2017-03-01","title":"React.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/react.md","body":"React claims to be "the V in MVC". But for us it is much more than that. React\nimproved the way we approach frontend applications as we build them. Its\nfunctional way of writing components and its declarative JSX syntax help us to\nbuild interactive UIs very efficiently. React's one-way data flow keeps\neverything modular and fast and makes even large applications more readable.
\nComponents are the central point of React - once we fully started\nthinking in react,\nour components became smaller, more reusable and better testable.
\nAfter some 1.5 years of experience with React and the steady growth of the\ncommunity and ecosystem around it, we can confidently say that we still see\ngreat protential to build upcoming projects with React.
\n"}],"name":"react","title":"React.js","ring":1,"quadrant":"languages-and-frameworks","body":"The past months have shown that React is still a great fit for us for frontend-heavy\napplications. With its rewritten core in version 16, Facebook shows how\nimportant this framework is for them. Therefore, Facebook is investing a lot of effort into React and a\nhealthy community. In addition, we REALLY enjoy writing React\ncomponents – so much so, that we have to move this library into adopt!
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/react.md"},{"flag":"changed","featured":true,"revisions":[{"name":"reactive-programming","release":"2019-11-01","title":"Reactive Programming","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/reactive-programming.md","body":"The reactive style of programming promotes event-based thinking and modeling -- \nand by that assists in creating more decoupled solutions.
\nSynergies arise, when people understand the concepts of this pattern: by using marble diagrams, \nwhich are a de-facto standard in visualizing algorithms in a reactive style, a common ground for communication \nis available regardless of the programming language used.
\nWhen appropriate, we choose more explicitly the Reactive Programming pattern and therefore moved this to "adopt".
\n"},{"name":"reactive-programming","release":"2018-03-01","title":"Reactive Programming","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/reactive-programming.md","body":"Classic (web-) applications typically consist of transactions that submit\nlarge forms to the server side, which then processes these and, in response, returns HTML\nfor the browser to render. Today's applications have more and more\nfine-grained 'real-time'-like aspects: A simple modification of a form field\ncould trigger a complete roundtrip to the server including other services and\npersistence. Naturally, all of these transactions should respect the\nexpectations of a user who wants a highly interactive application.
\n"Reactive Programming" tries to provide an answer to the challanges mentioned above\nby raising the level of abstraction. This allows you to focus on the stream of\nevents that make up your business logic in a responsive, asynchronous fashion.
\nThere are various descriptions of what Reactive Programming actually is - at\nthe most general level it is programming with asynchronous data streams and\ncontains tools to create, manipulate, combine and filter these streams. Under the term\n"Reactive Programming", we summarize the principles and implementations that\nunderlie ReactiveX and the Reactive\nManifesto.
\n"Reactive Programming" is employed in many of our services – frontend and\nbackend – but not always as an explicitly choosen pattern. As different\nplattforms have different means to tackle this style of programming, we choose\nto include "Reactive Programming" as a general Method and Patterns Item in\naddition to concrete libraries and APIs such as\nRx.JS or Akka\nStreams to highlight the\nimportance of the approach in general.
\n"}],"name":"reactive-programming","title":"Reactive Programming","ring":1,"quadrant":"methods-and-patterns","body":"The reactive style of programming promotes event-based thinking and modeling -- \nand by that assists in creating more decoupled solutions.
\nSynergies arise, when people understand the concepts of this pattern: by using marble diagrams, \nwhich are a de-facto standard in visualizing algorithms in a reactive style, a common ground for communication \nis available regardless of the programming language used.
\nWhen appropriate, we choose more explicitly the Reactive Programming pattern and therefore moved this to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/reactive-programming.md"},{"flag":"default","featured":true,"revisions":[{"name":"redux","release":"2017-03-01","title":"Redux","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/redux.md","body":"Redux helps us to maintain state in our frontend applications in a more predictable and clearer way. It is extendable though middleware, it has a great documentation and some awesome devtools that are especially helpful when you are new to Redux.
\nThe functional concepts for updating the state, combined with immutable data, lead to extremely easy and enjoyable unit tests - this is maybe the biggest plus for us developers.
\nThe official react-redux bindings also made it straightforward to weave Redux into our React applications. For asynchronous actions we use redux-sagas which has proven itself as a better alternative for redux-thunk.
\nCurrently, we use Redux only in our React projects, but we are evaluating it together with other frameworks such as Angular or Vue.js, as well.
\n"}],"name":"redux","title":"Redux","ring":2,"quadrant":"languages-and-frameworks","body":"Redux helps us to maintain state in our frontend applications in a more predictable and clearer way. It is extendable though middleware, it has a great documentation and some awesome devtools that are especially helpful when you are new to Redux.
\nThe functional concepts for updating the state, combined with immutable data, lead to extremely easy and enjoyable unit tests - this is maybe the biggest plus for us developers.
\nThe official react-redux bindings also made it straightforward to weave Redux into our React applications. For asynchronous actions we use redux-sagas which has proven itself as a better alternative for redux-thunk.
\nCurrently, we use Redux only in our React projects, but we are evaluating it together with other frameworks such as Angular or Vue.js, as well.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/redux.md"},{"flag":"default","featured":true,"revisions":[{"name":"resilience-thinking","release":"2017-03-01","title":"Resilience thinking","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/resilience-thinking.md","body":"Resilience is the cabability of an application or service to resist different error scenarios. Especially for distributed systems - where a lot of communication between different services happen - it's very important to explicitly think of implementing resilience.
\nThere are a lot of different resilience patterns and it is also a matter of the overall software design. Typical patterns and methods used are:
\n"Embrace Errors" should be the mindset - because its not a question if errors appear - it's just a question of when.
\n"}],"name":"resilience-thinking","title":"Resilience thinking","ring":2,"quadrant":"methods-and-patterns","body":"Resilience is the cabability of an application or service to resist different error scenarios. Especially for distributed systems - where a lot of communication between different services happen - it's very important to explicitly think of implementing resilience.
\nThere are a lot of different resilience patterns and it is also a matter of the overall software design. Typical patterns and methods used are:
\n"Embrace Errors" should be the mindset - because its not a question if errors appear - it's just a question of when.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/resilience-thinking.md"},{"flag":"default","featured":true,"revisions":[{"name":"rest-assured","release":"2017-03-01","title":"Rest Assured (Testing)","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rest-assured.md","body":"REST-assured is a Java DSL for simplifying testing of REST-based services built on top of HTTP Builder. It supports the most important http request methods and can be used to validate and verify the response of these requests.
\nAt AOE, we use REST-assured with Spock to automate our API testing. We appreciate the easy-to-use DSL, which uses the Given-When-Then template (also known as Gherkin language). This template helps other project members to understand the code/test easily.
\nBecause of the seamless integration with Spock and our positive experience in one of our major projects, we classify REST-assured as assess.
\n"}],"name":"rest-assured","title":"Rest Assured (Testing)","ring":3,"quadrant":"tools","body":"REST-assured is a Java DSL for simplifying testing of REST-based services built on top of HTTP Builder. It supports the most important http request methods and can be used to validate and verify the response of these requests.
\nAt AOE, we use REST-assured with Spock to automate our API testing. We appreciate the easy-to-use DSL, which uses the Given-When-Then template (also known as Gherkin language). This template helps other project members to understand the code/test easily.
\nBecause of the seamless integration with Spock and our positive experience in one of our major projects, we classify REST-assured as assess.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rest-assured.md"},{"flag":"default","featured":true,"revisions":[{"name":"rxjava","release":"2017-03-01","title":"RxJava","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjava.md","body":"RxJava is the Open Source Java implementation of ReactiveX. The main concept heavily relies on the Observer- (and Subscriber)-Pattern. An Observer emits a stream of data, which can be consumed by Subscribers. The Subscriber reacts (That's where the 'Rx' comes from) asynchronously to those data events. Reactive Extensions were originally developed by Mircosoft's Erik Meijer and his team and have been ported to all major programming languages after being released to the public as Open Source software. We use RxJava (but actually RxAndroid to be precise) in the Congstar Android App to let the UI layer react to changes in the underlaying data layer.
\n"}],"name":"rxjava","title":"RxJava","ring":2,"quadrant":"tools","body":"RxJava is the Open Source Java implementation of ReactiveX. The main concept heavily relies on the Observer- (and Subscriber)-Pattern. An Observer emits a stream of data, which can be consumed by Subscribers. The Subscriber reacts (That's where the 'Rx' comes from) asynchronously to those data events. Reactive Extensions were originally developed by Mircosoft's Erik Meijer and his team and have been ported to all major programming languages after being released to the public as Open Source software. We use RxJava (but actually RxAndroid to be precise) in the Congstar Android App to let the UI layer react to changes in the underlaying data layer.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjava.md"},{"flag":"default","featured":true,"revisions":[{"name":"rxjs","release":"2017-03-01","title":"RxJs","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjs.md","body":"RX/JS aka reactive streams
\nRxJS is an implementation for the reactive programming paradigm which implements mostly the observer and iterator\npattern and follows the functional programming ideas. The pattern actually got a renaissance because it's not completely\nnew but has new implementations in many frameworks and languages like Angular, Akka, Spring and many more. Reason for \nthat attention actually is (in the javascript world), that observables can be cancelled (by rules too) and observables\ncan pass (stream) data on multiple events. Both aspects are not well realizable using promises e.g. and both were also\ndetected as a huge limitation in the JavaScript community — and so it's worth to get an understanding for reactive\nprogramming in general.
\nWe at AOE actually use RxJS in combination with Angular and think that it's worth to dive deeper into this paradigm.
\n"}],"name":"rxjs","title":"RxJs","ring":2,"quadrant":"languages-and-frameworks","body":"RX/JS aka reactive streams
\nRxJS is an implementation for the reactive programming paradigm which implements mostly the observer and iterator\npattern and follows the functional programming ideas. The pattern actually got a renaissance because it's not completely\nnew but has new implementations in many frameworks and languages like Angular, Akka, Spring and many more. Reason for \nthat attention actually is (in the javascript world), that observables can be cancelled (by rules too) and observables\ncan pass (stream) data on multiple events. Both aspects are not well realizable using promises e.g. and both were also\ndetected as a huge limitation in the JavaScript community — and so it's worth to get an understanding for reactive\nprogramming in general.
\nWe at AOE actually use RxJS in combination with Angular and think that it's worth to dive deeper into this paradigm.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/rxjs.md"},{"flag":"default","featured":true,"revisions":[{"name":"sass","release":"2017-03-01","title":"SASS","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sass.md","body":"SASS (Syntactically Awesome Style-Sheets) is an extension to native CSS, which, as a preprocessor, simplifies the generation of CSS by offering features that enable developers to more efficiently write robust, better readable and maintainable CSS.
\nCore features of SASS are:
\nSASS has been widely adopted for many years and has evolved to an industry-standard backed by an active community since 2006.
\nThe learning curve is very smooth as SASS is fully compatible to CSS, meaning that all features are optional: Starting with SASS is as easy as renaming .css-files to .scss in a first step and then refactoring it step-by-step with the use of SASS features.
\nAt AOE, SASS has been recommended by the frontend COI and is used in nearly every current project.
\nMore information:
\n\n"}],"name":"sass","title":"SASS","ring":1,"quadrant":"languages-and-frameworks","body":"SASS (Syntactically Awesome Style-Sheets) is an extension to native CSS, which, as a preprocessor, simplifies the generation of CSS by offering features that enable developers to more efficiently write robust, better readable and maintainable CSS.
\nCore features of SASS are:
\nSASS has been widely adopted for many years and has evolved to an industry-standard backed by an active community since 2006.
\nThe learning curve is very smooth as SASS is fully compatible to CSS, meaning that all features are optional: Starting with SASS is as easy as renaming .css-files to .scss in a first step and then refactoring it step-by-step with the use of SASS features.
\nAt AOE, SASS has been recommended by the frontend COI and is used in nearly every current project.
\nMore information:
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sass.md"},{"flag":"default","featured":true,"revisions":[{"name":"scala-lang","release":"2018-03-01","title":"Scala Lang","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/scala-lang.md","body":"Scala is used in many projects at AOE. We have therefore moved it to the adopt level.
\n"},{"name":"scala-lang","release":"2017-03-01","title":"Scala Lang","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/scala-lang.md","body":"Besides Java, Scala is the most mature language on the Java Virtual Machine. Its unique blend of object-oriented and functional language features and rich type system with advanced type inference enables one to write concise code.
\nIt is fully interoperable with Java but has a big ecosystem of tools and frameworks on its own.
\nScala provides one of the best high-level concurrency- and async features on the language level as well as on the framework level, making it the default choice of twitter and the like.
\nAt AOE, we already use Scala in various projects to create scalable backend systems (Play, Akka) or for batch processing (Spark).
\n"}],"name":"scala-lang","title":"Scala Lang","ring":1,"quadrant":"languages-and-frameworks","body":"Scala is used in many projects at AOE. We have therefore moved it to the adopt level.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/scala-lang.md"},{"flag":"changed","featured":true,"revisions":[{"name":"self-service-infrastructure","release":"2019-11-01","title":"Self-service infrastructure","ring":2,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/self-service-infrastructure.md","body":"Moved to "trial".
\n"},{"name":"self-service-infrastructure","release":"2018-03-01","title":"Self-service infrastructure","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/self-service-infrastructure.md","body":"With growing teams, growing projects and growing infrastructures, we decided to follow the "You build it, you run it" approach, and when we started to run Kubernetes, where we have a great abstraction layer between infrastructure and applications, we decided to make the developer teams write their own Helm charts.\nBy agreeing on just a couple of patters, this allows us to easily manage a microservice architecture with more than 60 Applications, without too much hassle managing infrastructure/runtimes for (among others) JVM, Go and PHP applications.\nMost of the hosting/provisioning decisions are better kept within the team, as the teams know how their applications work. By providing a clear interface, this became the cornerstone for running our microservice architecture, and keeping the amount of actual servers much lower than in projects with a centralized operations/IT team.
\nEventually, self-service infrastructure, and "You build it, you run it", allowed us to give both our application developers as well as our infrastructure engineers more flexibility than one team explaining to another team what to do, resulting in a better collaboration than before.
\n"}],"name":"self-service-infrastructure","title":"Self-service infrastructure","ring":2,"quadrant":"platforms-and-aoe-services","body":"Moved to "trial".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/self-service-infrastructure.md"},{"flag":"default","featured":true,"revisions":[{"name":"settings-injection","release":"2017-03-01","title":"Settings Injection","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/settings-injection.md","body":"While deploying applications to an environment, the application typically needs to be configured for that specific environment. Typical settings include domain names, database credentials and the location of other dependent services such as cache backends, queues or session storages.
\nThese settings should not be shipped with the build package. Instead, it's the environment - this build is being deployed to - that should expose these values to application. A common way to "inject" these values is by making them available as environment variables or dynamically creating configuration files for the application. You can achieve this pattern without special tools - but this concept of settings injection also works with tools such as Consul, kubernetes (with configMaps and secrets) or YAD.
\nIn this manner, the build package can be independent from the environment it's being deployed to - making it easier to follow the "Build once, deploy often" CI/CD principle.
\n"}],"name":"settings-injection","title":"Settings Injection","ring":1,"quadrant":"methods-and-patterns","body":"While deploying applications to an environment, the application typically needs to be configured for that specific environment. Typical settings include domain names, database credentials and the location of other dependent services such as cache backends, queues or session storages.
\nThese settings should not be shipped with the build package. Instead, it's the environment - this build is being deployed to - that should expose these values to application. A common way to "inject" these values is by making them available as environment variables or dynamically creating configuration files for the application. You can achieve this pattern without special tools - but this concept of settings injection also works with tools such as Consul, kubernetes (with configMaps and secrets) or YAD.
\nIn this manner, the build package can be independent from the environment it's being deployed to - making it easier to follow the "Build once, deploy often" CI/CD principle.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/settings-injection.md"},{"flag":"changed","featured":true,"revisions":[{"name":"sonarqube","release":"2019-11-01","title":"SonarQube","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/sonarqube.md","body":"At AOE, we are using SonarQube to get a historical overview of the code quality in our Projects. With SonarQube, you can get a quick insight into the condition of your code. It analyzes many languages and provides numerous static analysis rules.\nSonarQube is also being used for Static Application Security Testing (SAST) which scans our code for potential security vulnerabilities and is an essential element of our Secure Software Development Lifecycle.
\n"},{"name":"sonarqube","release":"2018-03-01","title":"SonarQube","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/sonarqube.md","body":"At AOE, we're evaluating SonarQube to get an historical overview of the code quality of our Projects. With SonarQube, you can get a quick hint about the condition of your code. It analyzes many languages and provides numerous static analysis rules.
\n"}],"name":"sonarqube","title":"SonarQube","ring":2,"quadrant":"tools","body":"At AOE, we are using SonarQube to get a historical overview of the code quality in our Projects. With SonarQube, you can get a quick insight into the condition of your code. It analyzes many languages and provides numerous static analysis rules.\nSonarQube is also being used for Static Application Security Testing (SAST) which scans our code for potential security vulnerabilities and is an essential element of our Secure Software Development Lifecycle.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/sonarqube.md"},{"flag":"default","featured":false,"revisions":[{"name":"sparkpost","release":"2017-03-01","title":"SparkPost","ring":3,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/sparkpost.md","body":"Sparkpost is an SaaS service for E-Mail delivery and E-Mail templating that can be used to send E-Mails by calling an API.
\nIn a lot of projects, it is a typical requirement that different E-Mails need to be sent and that the project stakeholders want to adjust E-Mail templates and content on a relatively regular basis.
\nAlso, (mass) sending E-Mails and avoiding that they are classified as Spam is not an easy topic. That's why we decided to use E-Mail delivery services in our projects and evaluated different providers.
\nWe decided to start using SparkPost because of pricing, feature set and the available reviews on the Internet. There are also other possible solutions such as SendGrid or Postmark.
\n"}],"name":"sparkpost","title":"sparkpost.md","ring":3,"quadrant":"platforms-and-aoe-services","body":"Sparkpost is an SaaS service for E-Mail delivery and E-Mail templating that can be used to send E-Mails by calling an API.
\nIn a lot of projects, it is a typical requirement that different E-Mails need to be sent and that the project stakeholders want to adjust E-Mail templates and content on a relatively regular basis.
\nAlso, (mass) sending E-Mails and avoiding that they are classified as Spam is not an easy topic. That's why we decided to use E-Mail delivery services in our projects and evaluated different providers.
\nWe decided to start using SparkPost because of pricing, feature set and the available reviews on the Internet. There are also other possible solutions such as SendGrid or Postmark.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/sparkpost.md"},{"flag":"default","featured":true,"revisions":[{"name":"spock_geb","release":"2017-03-01","title":"Spock + Geb","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spock_geb.md","body":"spockframework.org - Spock is a testing and specification framework for Java and Groovy applications. What makes it stand out from the crowd is its beautiful and highly expressive specification language. Thanks to its JUnit runner, Spock is compatible with most IDEs, build tools and continuous integration servers. Spock is inspired from JUnit, jMock, RSpec, Groovy, Scala, Vulcans, and other fascinating life forms.
\ngebish.org - Geb is a browser automation solution. It brings together the power of WebDriver, the elegance of jQuery content selection, the robustness of Page Object modelling and the expressiveness of the Groovy language. It can be used for scripting, scraping and general automation or equally as a functional/web/acceptance testing solution via integration with testing frameworks such as Spock, JUnit & TestNG.
\nAt AOE, we use Spock in combination with Geb in various projects for black-box testing. Mainly, we implement our functional integration and acceptance testing automation with these frameworks, which work together seamlessly. And, we also like the convenience of extending the tests with Groovy built-ins or custom extensions.
\nBecause of the successful use in two of our large projects and the wide range of opportunities within the testing domain with Spock and Geb, we classify this combo with adopt.
\n\n"}],"name":"spock_geb","title":"Spock + Geb","ring":1,"quadrant":"languages-and-frameworks","body":"spockframework.org - Spock is a testing and specification framework for Java and Groovy applications. What makes it stand out from the crowd is its beautiful and highly expressive specification language. Thanks to its JUnit runner, Spock is compatible with most IDEs, build tools and continuous integration servers. Spock is inspired from JUnit, jMock, RSpec, Groovy, Scala, Vulcans, and other fascinating life forms.
\ngebish.org - Geb is a browser automation solution. It brings together the power of WebDriver, the elegance of jQuery content selection, the robustness of Page Object modelling and the expressiveness of the Groovy language. It can be used for scripting, scraping and general automation or equally as a functional/web/acceptance testing solution via integration with testing frameworks such as Spock, JUnit & TestNG.
\nAt AOE, we use Spock in combination with Geb in various projects for black-box testing. Mainly, we implement our functional integration and acceptance testing automation with these frameworks, which work together seamlessly. And, we also like the convenience of extending the tests with Groovy built-ins or custom extensions.
\nBecause of the successful use in two of our large projects and the wide range of opportunities within the testing domain with Spock and Geb, we classify this combo with adopt.
\n\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spock_geb.md"},{"flag":"changed","featured":true,"revisions":[{"name":"spring-boot","release":"2019-11-01","title":"Spring Boot","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/spring-boot.md","body":"We now have several years of experiences with Spring Boot, \nand a big projects Microservice Environment runs completely on Spring Boot, \nso it's time to update it to "adopt".
\n"},{"name":"spring-boot","release":"2018-03-01","title":"Spring Boot","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/spring-boot.md","body":"We have had good experiences with Spring Boot, and already have several Spring Boot-based services running in \nproduction. We like the ease of kickstarting new services and the variety of tools in the Spring ecosystem.
\n"},{"name":"spring-boot","release":"2017-03-01","title":"Spring Boot","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-boot.md","body":"With Spring Boot you create standalone Spring Applications with minimum configuration. Spring Boot rapidly gets you up and running for production.
\nWith an embedded Tomcat, Jetty and Undertow you have everything you need to deploy your application out-of-the-box.
\nThe Spring Cloud ecosystem also gives you a lot of extension points for developing, deploying and running cloud applications.
\nIt's based on the rock-solid Spring framework and provides excellent documentation.
\nAt AOE, we use Spring Boot in a microservice architecture. Together with Groovy as the implementation Language, and some other Tools (Spring Security, Cloud, HATEOAS, Data, Session) from the Spring environment, we are able to create complex and powerful applications in no time.
\n"}],"name":"spring-boot","title":"Spring Boot","ring":1,"quadrant":"languages-and-frameworks","body":"We now have several years of experiences with Spring Boot, \nand a big projects Microservice Environment runs completely on Spring Boot, \nso it's time to update it to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/spring-boot.md"},{"flag":"default","featured":true,"revisions":[{"name":"spring-rest-docs","release":"2017-03-01","title":"Spring REST Docs","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-rest-docs.md","body":"Spring REST Docs auto generates Asciidoctor snippets with the help of Spring MVC Test or RestAssured. So you can be sure that your tests are inline with the documentation.
\nAt AOE, we use Spring REST Docs to document our Rest Services and Hal Resources. We also use it to auto generate Wiremock Stubs, so the consumer of the service can test against the exact API of the service.
\n"}],"name":"spring-rest-docs","title":"Spring REST Docs","ring":3,"quadrant":"tools","body":"Spring REST Docs auto generates Asciidoctor snippets with the help of Spring MVC Test or RestAssured. So you can be sure that your tests are inline with the documentation.
\nAt AOE, we use Spring REST Docs to document our Rest Services and Hal Resources. We also use it to auto generate Wiremock Stubs, so the consumer of the service can test against the exact API of the service.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/spring-rest-docs.md"},{"flag":"new","featured":true,"revisions":[{"name":"storybook","release":"2019-11-01","title":"Storybook","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/storybook.md","body":"Storybook is a user interface development environment and playground for UI components. The tool enables developers to create components independently and showcase components interactively in an isolated development environment.\nStorybook runs outside of the main app so users can develop UI components in isolation without worrying about app specific dependencies and requirements.
\n"}],"name":"storybook","title":"Storybook","ring":3,"quadrant":"tools","body":"Storybook is a user interface development environment and playground for UI components. The tool enables developers to create components independently and showcase components interactively in an isolated development environment.\nStorybook runs outside of the main app so users can develop UI components in isolation without worrying about app specific dependencies and requirements.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/storybook.md"},{"flag":"default","featured":true,"revisions":[{"name":"strategic-domain-driven-design","release":"2017-03-01","title":"Strategic Domain Driven Design","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/strategic-domain-driven-design.md","body":"Design of distributed applications need to be done wisely. Strategic Domain Driven Design is an approach for modelling large-scale applications and systems and is introduced in the last part of Eric Evans' book Domain Driven Design.
\nDomain driven design is a well-known pattern family and has been established at AOE for quite some time now. Unlike Domain Driven Design, which focuses on the tactical design in an application, strategic domain driven design is an approach that is very helpful for the high-level strategic design of an application and distributed software architecture.
\nIt is a pattern familiy focused on using and defining Bounded Context and thinking explicitly of the different relationship patterns and the required "translation" of similar "concepts" between the bounded contexts. It is helpful to argue and find a good strategic architecture in alignment with the requirements, the domain and by considering Conway's Law.\nA context map and a common conceptional core help to understand and improve the overall strategic picture. Especially with the Microservice approach, it is important to define and connect services following the low coupling - high cohesion principles by idendifying fitting bounded contexts.
\nThe following chart gives an overview of possible relationships between bounded contexts:\n
While we have found that this approach is especially useful in designing distributed systems and applications with microservices, we have also extended this approach to provide guidlines for general enterprise architectures.
\n"}],"name":"strategic-domain-driven-design","title":"Strategic Domain Driven Design","ring":1,"quadrant":"methods-and-patterns","body":"Design of distributed applications need to be done wisely. Strategic Domain Driven Design is an approach for modelling large-scale applications and systems and is introduced in the last part of Eric Evans' book Domain Driven Design.
\nDomain driven design is a well-known pattern family and has been established at AOE for quite some time now. Unlike Domain Driven Design, which focuses on the tactical design in an application, strategic domain driven design is an approach that is very helpful for the high-level strategic design of an application and distributed software architecture.
\nIt is a pattern familiy focused on using and defining Bounded Context and thinking explicitly of the different relationship patterns and the required "translation" of similar "concepts" between the bounded contexts. It is helpful to argue and find a good strategic architecture in alignment with the requirements, the domain and by considering Conway's Law.\nA context map and a common conceptional core help to understand and improve the overall strategic picture. Especially with the Microservice approach, it is important to define and connect services following the low coupling - high cohesion principles by idendifying fitting bounded contexts.
\nThe following chart gives an overview of possible relationships between bounded contexts:\n
While we have found that this approach is especially useful in designing distributed systems and applications with microservices, we have also extended this approach to provide guidlines for general enterprise architectures.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/strategic-domain-driven-design.md"},{"flag":"new","featured":true,"revisions":[{"name":"stride-threat-modeling","release":"2019-11-01","title":"STRIDE Threat Modeling","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/stride-threat-modeling.md","body":"STRIDE is a model of threat groups that helps to identify security threats to any application, component or infrastructure.
\nThe acronym stands for:
\nAOE is applying the threat model in collaborative sessions using the Elevation of Privilege Card Game which helps to spark imagination and makes threats more tangible.
\n"}],"name":"stride-threat-modeling","title":"STRIDE Threat Modeling","ring":2,"quadrant":"methods-and-patterns","body":"STRIDE is a model of threat groups that helps to identify security threats to any application, component or infrastructure.
\nThe acronym stands for:
\nAOE is applying the threat model in collaborative sessions using the Elevation of Privilege Card Game which helps to spark imagination and makes threats more tangible.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/stride-threat-modeling.md"},{"flag":"default","featured":false,"revisions":[{"name":"styleguide-driven-development","release":"2018-03-01","title":"Styleguide Driven Development","ring":1,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/styleguide-driven-development.md","body":"Updated to "adopt".
\n"},{"name":"styleguide-driven-development","release":"2017-03-01","title":"Styleguide Driven Development","ring":2,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/styleguide-driven-development.md","body":"The goal of Styleguide Driven Development is to develop your application user Interface independently and reusable in a Pattern Library.\\\nIn the old days, the frontend was developed based on page-centric Photoshop files which made it hard to change things afterwards. With styleguide driven development you build smaller elements, which are reusable in all of your frontends.
\nYou can start developing your UI components (HTML/CSS/JavaScript) very early in the production phase without having to wait for a ready-to-use development system.\\\nDesigners and Testers can give feedback early and you can share the documentation and code with external teams.
\nAt AOE, we use Hologram to build a living documentation right from the source files. Whenever a new UI Element is needed, a developer starts building it in the styleguide -- not in the actual application code. By writing the code for the new component, the documentation for it is created instantly. Any other developer can easily see which elements exist and how it can be used in the code.
\n"}],"name":"styleguide-driven-development","title":"styleguide-driven-development.md","ring":1,"quadrant":"methods-and-patterns","body":"Updated to "adopt".
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/styleguide-driven-development.md"},{"flag":"default","featured":false,"revisions":[{"name":"symfony-components","release":"2017-03-01","title":"Symfony Components","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/symfony-components.md","body":"Symfony Components are part of the Symfony Framework and they are designed as decoupled and reusable PHP components.
\nTheir use cases vary from simple little helpers such as a beautified var_dump to more complex ones such as access control, list-based security mechanisms and an easy-to-integrate console component to give your already existing applications some CLI capabilities. They are used by a lot of PHP-based projects such as Typo3, Magento, Composer, PHPUnit and Doctrine, with contributions continually taking place. If you are planning the next project with PHP components, you should have a look at the Symfony Components list, which includes a lot of well-designed, decoupled Open Source pieces of PHP code.
\n"}],"name":"symfony-components","title":"symfony-components.md","ring":2,"quadrant":"languages-and-frameworks","body":"Symfony Components are part of the Symfony Framework and they are designed as decoupled and reusable PHP components.
\nTheir use cases vary from simple little helpers such as a beautified var_dump to more complex ones such as access control, list-based security mechanisms and an easy-to-integrate console component to give your already existing applications some CLI capabilities. They are used by a lot of PHP-based projects such as Typo3, Magento, Composer, PHPUnit and Doctrine, with contributions continually taking place. If you are planning the next project with PHP components, you should have a look at the Symfony Components list, which includes a lot of well-designed, decoupled Open Source pieces of PHP code.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/symfony-components.md"},{"flag":"new","featured":true,"revisions":[{"name":"temporal-modeling","release":"2019-11-01","title":"Temporal Modeling","ring":3,"quadrant":"methods-and-patterns","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/temporal-modeling.md","body":"Temporal Modeling is way of modeling software systems and components by putting events first.
\nThe usual way of modeling software is to find structures, things and relations.\nWe try to find the relevant aspects of a domain and put all properties into an object-oriented model.\nTrying to create a second model for a related business process, having the structural model already in place,\nmight result in a process representation that is tightly coupled with the assumptions built up from the structural\nmodel and too far away from reality.
\nBy focusing on the domain processes first, one can visualize all aspects of a process over time.\nHaving the process visualized, allows to see potential pitfalls or forgotten aspects.\nWith a temporal model at hand, it is easy to create a object-oriented or structural model that perfectly\nrepresents all required information.
\nWe tried this method when tackling big or complex domains.
\n"}],"name":"temporal-modeling","title":"Temporal Modeling","ring":3,"quadrant":"methods-and-patterns","body":"Temporal Modeling is way of modeling software systems and components by putting events first.
\nThe usual way of modeling software is to find structures, things and relations.\nWe try to find the relevant aspects of a domain and put all properties into an object-oriented model.\nTrying to create a second model for a related business process, having the structural model already in place,\nmight result in a process representation that is tightly coupled with the assumptions built up from the structural\nmodel and too far away from reality.
\nBy focusing on the domain processes first, one can visualize all aspects of a process over time.\nHaving the process visualized, allows to see potential pitfalls or forgotten aspects.\nWith a temporal model at hand, it is easy to create a object-oriented or structural model that perfectly\nrepresents all required information.
\nWe tried this method when tackling big or complex domains.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/temporal-modeling.md"},{"flag":"changed","featured":true,"revisions":[{"name":"terraform","release":"2019-11-01","title":"Terraform","ring":1,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/terraform.md","body":"Terraform is a tool for building, changing and versioning infrastructure using the infrastructure as code pattern.\nTerraform supports popular service providers like AWS, Google Cloud Platform, Azure and many more.
\nInfrastructure is described in configuration files trough the HCL (HashiCorp Configuration Language), which brings a set of string interpolations and built-in functions, \nincluding conditionals and loops. Terraform validates configuration files before trying to run updates. It checks not only that all files use the correct syntax, \nbut also that all parameters are accessible and the configuration as a whole is valid. In Terraform, you can (and should) run a ‘plan’ step before applying any changes. \nThis step tells you precisely what is going to change and why.\nAnother feature of Terraform is that it makes it easy to reuse code by using modules. That gives a lot of leeway in structuring projects in the way it makes most sense.
\nHere at AOE we use terraform in multiple teams to provision infrastructure and manage their lifecycle on cloud platforms such as AWS and for platforms such as Kubernetes.
\n"},{"name":"terraform","release":"2018-03-01","title":"Terraform","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/terraform.md","body":"For the infrastructure of our OM3 projects we run multiple Kubernetes clusters, and to orchestrate the infrastructure provisioning we quickly decided to go with Terraform.\nTerraform allows us to easily manage our infrastructure, from AWS EC2 instances to RabbitMQ message queues.\nAlso, the Kops installer for Kubernetes on AWS uses Terraform as its main building brick, and we can trigger Kops via Terraform.
\nWe bring terraform together with Helm to manage similar parts of the infrastructure, for example a shared file with domainname to application mappings allows us to provision Route 53 DNS entries via Terraform and then roll out Kubernetes Ingress definitions with the appropriate hostname to service mapping via Helm.
\n"}],"name":"terraform","title":"Terraform","ring":1,"quadrant":"platforms-and-aoe-services","body":"Terraform is a tool for building, changing and versioning infrastructure using the infrastructure as code pattern.\nTerraform supports popular service providers like AWS, Google Cloud Platform, Azure and many more.
\nInfrastructure is described in configuration files trough the HCL (HashiCorp Configuration Language), which brings a set of string interpolations and built-in functions, \nincluding conditionals and loops. Terraform validates configuration files before trying to run updates. It checks not only that all files use the correct syntax, \nbut also that all parameters are accessible and the configuration as a whole is valid. In Terraform, you can (and should) run a ‘plan’ step before applying any changes. \nThis step tells you precisely what is going to change and why.\nAnother feature of Terraform is that it makes it easy to reuse code by using modules. That gives a lot of leeway in structuring projects in the way it makes most sense.
\nHere at AOE we use terraform in multiple teams to provision infrastructure and manage their lifecycle on cloud platforms such as AWS and for platforms such as Kubernetes.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/terraform.md"},{"flag":"changed","featured":true,"revisions":[{"name":"typescript","release":"2019-11-01","title":"Typescript","ring":1,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/typescript.md","body":"As writing frontend applications becomes more complex, TypeScript allows us to scale client side code easily, even with large code bases. We use typescript successfully at production for many projects and we are only going to use it even more in the future. We highly recommend using typescript over javascript, therefore we have decided to move it to adopt.
\n"},{"name":"typescript","release":"2017-03-01","title":"Typescript","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/typescript.md","body":"TypeScript is a language that gets transpiled to native JavaScript code.
\nIt offers support for the latest EcmaScript features and has strict typing and support for interfaces built in.
\nJavaScript scoping, which led into recurring workarounds such as var self = this, myFunc.bind(this)_,_was eliminated in TypeScript.
\nIn TypeScript this stays this, which leads to more readable and understandable code from an OOP perspective.
\nTypeScript continues to be actively developed by Microsoft and is well-Integrated in today's IDEs.
\nThe excellent structure and the possibilities for extension make it a good choice to consider for larger JavaScript projects.
\nTypescript was the choice for Angular and one can assume that it will get more traction with the success of Angular in the future.
\nThere are also projects that support Typescript „code execution“ on the server such as ts-node.
\n"}],"name":"typescript","title":"Typescript","ring":1,"quadrant":"languages-and-frameworks","body":"As writing frontend applications becomes more complex, TypeScript allows us to scale client side code easily, even with large code bases. We use typescript successfully at production for many projects and we are only going to use it even more in the future. We highly recommend using typescript over javascript, therefore we have decided to move it to adopt.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/typescript.md"},{"flag":"default","featured":false,"revisions":[{"name":"typo3-as-a-framework","release":"2017-03-01","title":"TYPO3 as a Framework","ring":4,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/typo3-as-a-framework.md","body":"We should avoid building new projects around TYPO3 by default. A lot of past projects started with CMS-only features in the beginning, and, for example, developed toward highly customized E-Commerce platforms. Instead of rearranging the architecture in a useful way, functionality was built on top of TYPO3's core and its extension framework Extbase. In the context of larger projects, this lead to deployment monoliths and the inability to integrate new technologies.
\nWhile in the past it was easy to kickstart a TYPO3 project with AOE's custom-tailored kickstarter, we now have a lot of knowledge and tools available to start projects with a much smarter architecture.\nThis does not mean you shouldn't use TYPO3 anymore, but use it as the tool it is: a content management system.
\n"}],"name":"typo3-as-a-framework","title":"typo3-as-a-framework.md","ring":4,"quadrant":"tools","body":"We should avoid building new projects around TYPO3 by default. A lot of past projects started with CMS-only features in the beginning, and, for example, developed toward highly customized E-Commerce platforms. Instead of rearranging the architecture in a useful way, functionality was built on top of TYPO3's core and its extension framework Extbase. In the context of larger projects, this lead to deployment monoliths and the inability to integrate new technologies.
\nWhile in the past it was easy to kickstart a TYPO3 project with AOE's custom-tailored kickstarter, we now have a lot of knowledge and tools available to start projects with a much smarter architecture.\nThis does not mean you shouldn't use TYPO3 anymore, but use it as the tool it is: a content management system.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/typo3-as-a-framework.md"},{"flag":"new","featured":true,"revisions":[{"name":"vault","release":"2019-11-01","title":"Vault","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vault.md","body":"Hashicorp Vault is a Go application with a Rest/Cli interface that you can use to securely access secrets.\nA secret can be any sensitive data, such as credentials, certificates, access tokens, encryption keys etc. \nVaults key features are a secure secret storage, dynamic secretes (create on-demand secrets), data encryption, secret leasing, renewal and revocation.
\n"}],"name":"vault","title":"Vault","ring":2,"quadrant":"tools","body":"Hashicorp Vault is a Go application with a Rest/Cli interface that you can use to securely access secrets.\nA secret can be any sensitive data, such as credentials, certificates, access tokens, encryption keys etc. \nVaults key features are a secure secret storage, dynamic secretes (create on-demand secrets), data encryption, secret leasing, renewal and revocation.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vault.md"},{"flag":"default","featured":true,"revisions":[{"name":"vue","release":"2018-03-01","title":"Vue.js","ring":2,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/vue.md","body":"Updated to "trial".
\n"},{"name":"vue","release":"2017-03-01","title":"Vue.js","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/vue.md","body":"Vue is a progressive, incrementally adoptable framework for building user interfaces maintained by Evan You. Unlike other monolithic frameworks, the core library is focused on the view layer only and is very easy to pick up and integrate with other libraries or existing projects. Vue is also perfectly capable of powering sophisticated single-page applications when used in combination with modern tooling and supporting libraries such as vuex and vue-router.
\nVue uses an HTML-based template syntax that allows you to declaratively bind the rendered DOM to the underlying Vue instance’s data. Under the hood, Vue compiles the templates into Virtual DOM render functions. Combined with the reactivity system Vue is able to intelligently figure out the minimal amount of components to re-render and apply the minimal amount of DOM manipulations when the app state changes, which provides for very high performance.
\nApplications can be split into Single File Components - a single file containing the template (HTML), style (CSS) and functionality (JS) - which simplifies maintainability and testability of the code and promotes reusability across other projects.
\n"}],"name":"vue","title":"Vue.js","ring":2,"quadrant":"languages-and-frameworks","body":"Updated to "trial".
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/vue.md"},{"flag":"new","featured":true,"revisions":[{"name":"vuex","release":"2019-11-01","title":"Vuex","ring":3,"quadrant":"languages-and-frameworks","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vuex.md","body":"Vuex is a state management pattern + library for Vue.js applications.
\n"}],"name":"vuex","title":"Vuex","ring":3,"quadrant":"languages-and-frameworks","body":"Vuex is a state management pattern + library for Vue.js applications.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/vuex.md"},{"flag":"default","featured":true,"revisions":[{"name":"webpack","release":"2018-03-01","title":"Webpack","ring":1,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/webpack.md","body":"In the last few years, Webpack has grown to become the de-facto standard for Web\nbundling in the JavaScript-Ecosystem. With Version 3, Webpack is a more robust\nand better documented bundler with nice new features such as\nscope hoisting.\nBecause of this, and because of the continuously growing community, we have adopted Webpack for nearly\nevery single-page application we have.
\n"},{"name":"webpack","release":"2017-03-01","title":"Webpack","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/webpack.md","body":"Webpack is a web bundler for JavaScript applications. Instead of writing scripts to build and bundle your app like you would with Gulp, you just define what files you want to load into your bundle.
\nIn the following example, we define that JavaScript files should be handled by babel-loader, excluding the files from node_modules. The logic behind the process comes from the loader. You can find the right loader in npm.
\n{\n test: /\\.js$/,\n loader: 'babel-loader',\n exclude: /node_modules/,\n}\nOn top of that you can use plugins to optimize your bundle like uglifying your code or put your common libraries in a separate file.
\nUnder the hood, you've got nice features such as:
\nThe configuration is simple and there is excellent and extensive documentation.
\n"}],"name":"webpack","title":"Webpack","ring":1,"quadrant":"tools","body":"In the last few years, Webpack has grown to become the de-facto standard for Web\nbundling in the JavaScript-Ecosystem. With Version 3, Webpack is a more robust\nand better documented bundler with nice new features such as\nscope hoisting.\nBecause of this, and because of the continuously growing community, we have adopted Webpack for nearly\nevery single-page application we have.
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/webpack.md"},{"flag":"default","featured":true,"revisions":[{"name":"wiremock","release":"2017-03-01","title":"Wiremock","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/wiremock.md","body":"WireMock is an HTTP mock server - it can be used to mock APIs for testing.
\nAt its core, it is a web server that can be prepared to serve canned responses to particular requests (stubbing), and that captures incoming requests so that they can be checked later (verification). It also has an assortment of other useful features including record/playback of interactions with other APIs, injection of faults and delays, simulation of stateful behavior.
\nIt can be used as a library by any JVM application, or run as a standalone process either on the same host as the system under test or a remote server. All of WireMock's features are accessible via its REST (JSON) interface and its Java API. Additionally, the mock server can be configured via JSON files.
\nAt AOE, we use WireMock as a standalone server to mock APIs that are outside our system context to get a stable environment for testing and rapid feedback. Besides the decoupled test and development advantages, the mocked APIs can also be used in contract-based tests. We also use embedded WireMock in functional tests to stub external services. The explicit test of faults are especially helpful in building and testing the resilience of your application.
\nBecause of the features such as flexible deployment, powerful request matching and record/payback interactions, as well as the fact that the server runs stable in our project environments, we classify WireMock as trial.
\n"}],"name":"wiremock","title":"Wiremock","ring":2,"quadrant":"tools","body":"WireMock is an HTTP mock server - it can be used to mock APIs for testing.
\nAt its core, it is a web server that can be prepared to serve canned responses to particular requests (stubbing), and that captures incoming requests so that they can be checked later (verification). It also has an assortment of other useful features including record/playback of interactions with other APIs, injection of faults and delays, simulation of stateful behavior.
\nIt can be used as a library by any JVM application, or run as a standalone process either on the same host as the system under test or a remote server. All of WireMock's features are accessible via its REST (JSON) interface and its Java API. Additionally, the mock server can be configured via JSON files.
\nAt AOE, we use WireMock as a standalone server to mock APIs that are outside our system context to get a stable environment for testing and rapid feedback. Besides the decoupled test and development advantages, the mocked APIs can also be used in contract-based tests. We also use embedded WireMock in functional tests to stub external services. The explicit test of faults are especially helpful in building and testing the resilience of your application.
\nBecause of the features such as flexible deployment, powerful request matching and record/payback interactions, as well as the fact that the server runs stable in our project environments, we classify WireMock as trial.
\n","info":"","release":"2017-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/wiremock.md"},{"flag":"default","featured":false,"revisions":[{"name":"xataface","release":"2017-03-01","title":"Xataface","ring":4,"quadrant":"platforms-and-aoe-services","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/xataface.md","body":"In the past, we used a custom-developed toolset with Xataface,T3Deploy and a settings migration tool as an easy way to manage TYPO3- and Magento-related configurations and to automatically create environments on our shared integration/dev-servers.
\nToday, there is no advantage or need for Xataface. Don't use it anymore
\n"}],"name":"xataface","title":"xataface.md","ring":4,"quadrant":"platforms-and-aoe-services","body":"In the past, we used a custom-developed toolset with Xataface,T3Deploy and a settings migration tool as an easy way to manage TYPO3- and Magento-related configurations and to automatically create environments on our shared integration/dev-servers.
\nToday, there is no advantage or need for Xataface. Don't use it anymore
\n","info":"","release":"2018-03-01","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/xataface.md"},{"flag":"default","featured":false,"revisions":[{"name":"xmlunit","release":"2017-03-01","title":"XMLUnit","ring":3,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2017-03-01/xmlunit.md","body":"XMLUnit is a Java and .NET testing framework for XML documents. It is very useful for performing contract tests with SOAP interfaces or other XML-based message types.
\nComparing strings of XML can lead to instable tests because of the changing order of elements or changed values, etc. XMLUnit provides features to address these issues. It is possible to validate against an XML Schema, use XPath queries or compare against expected outcomes. It also comes with a nice diff-engine which makes it easy to check the parts of an XML document that are important.
\n"}],"name":"xmlunit","title":"xmlunit.md","ring":3,"quadrant":"tools","body":"XMLUnit is a Java and .NET testing framework for XML documents. It is very useful for performing contract tests with SOAP interfaces or other XML-based message types.
\nComparing strings of XML can lead to instable tests because of the changing order of elements or changed values, etc. XMLUnit provides features to address these issues. It is possible to validate against an XML Schema, use XPath queries or compare against expected outcomes. It also comes with a nice diff-engine which makes it easy to check the parts of an XML document that are important.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/xmlunit.md"},{"flag":"default","featured":false,"revisions":[{"name":"yarn","release":"2018-03-01","title":"Yarn","ring":2,"quadrant":"tools","fileName":"/home/jarek/work/techradar-builder/radar/2018-03-01/yarn.md","body":"Yarn is a dependency management tool for frontend (node) projects similar to npm. It also uses the npm registry and \ninfrastructure. According to Yarn, the benefits are that Yarn is much faster, automatically writes a .lock file and \nbuilds up a local cache to be even faster when installing packages again.
\nAt AOE, we started using Yarn in different projects to evaluate if we can switch to Yarn for all projects.
\n"}],"name":"yarn","title":"yarn.md","ring":2,"quadrant":"tools","body":"Yarn is a dependency management tool for frontend (node) projects similar to npm. It also uses the npm registry and \ninfrastructure. According to Yarn, the benefits are that Yarn is much faster, automatically writes a .lock file and \nbuilds up a local cache to be even faster when installing packages again.
\nAt AOE, we started using Yarn in different projects to evaluate if we can switch to Yarn for all projects.
\n","info":"","release":"2019-11-01","fileName":"/home/jarek/work/techradar-builder/radar/2019-11-01/yarn.md"}],"releases":["2017-03-01","2018-03-01","2019-11-01"]} \ No newline at end of file diff --git a/tasks/radar.ts b/tasks/radar.ts index f1726c2..9cf84b2 100644 --- a/tasks/radar.ts +++ b/tasks/radar.ts @@ -141,6 +141,7 @@ const addRevisionToItem = ( let newItem: Item = { ...item, ...revision, + ring: revision.ring ? revision.ring : item.ring, // prevent empty revision ring overriding the one from the item. This one field is special as it's an enum. body: ignoreEmptyRevisionBody(revision, item), };