added sentry

This commit is contained in:
Zimovskii Anatolii 2023-03-18 16:43:59 +03:00
commit a1f37f25b4
Signed by: stam
GPG Key ID: 9911D9EF664EEE14
53 changed files with 2435 additions and 2 deletions

View File

@ -29,6 +29,9 @@ jobs:
- name: "Build and Deploy Advanced Teamcity Server Image:" - name: "Build and Deploy Advanced Teamcity Server Image:"
run: cd linux/advanced/teamcity/server && pwd && make build && make deploy run: cd linux/advanced/teamcity/server && pwd && make build && make deploy
- name: "Build and Deploy Advanced Sentry Image:"
run: cd linux/advanced/sentry/latest && pwd && make build && make deploy
# - name: "Build and Deploy Advanced Redash Images:" # - name: "Build and Deploy Advanced Redash Images:"
# run: cd linux/advanced/redash && pwd && make sync && make patch && make build && make deploy # run: cd linux/advanced/redash && pwd && make sync && make patch && make build && make deploy

View File

@ -1,9 +1,12 @@
# Changelog # Changelog
## 2023 ## 2023
* `feb-mar`
* added new `sentry` advanced image
* `jan` * `jan`
* added new `jira` releases * added new `jira` releases
* added new `bitbucket` releases * added new `bitbucket` releases
* added new `confluence` releases * added new `confluence` releases
* added *first* `crowd` release (version `5.1.1`)
* fixed `git` binary sources * fixed `git` binary sources
------------------------------------------------------------------- -------------------------------------------------------------------
## 2022 ## 2022

View File

@ -78,6 +78,9 @@ advanced-teamcity-server-images:
advanced-redash-images: advanced-redash-images:
cd `pwd`/linux/advanced/redash && pwd && make sync && make patch && make build && make deploy cd `pwd`/linux/advanced/redash && pwd && make sync && make patch && make build && make deploy
advanced-sentry-images:
cd `pwd`/linux/advanced/sentry/latest && pwd && make sync && make patch && make build && make deploy
advanced-zabbix-images: advanced-zabbix-images:
cd `pwd`/linux/advanced/zabbix/latest/agent && pwd && make build && make deploy cd `pwd`/linux/advanced/zabbix/latest/agent && pwd && make build && make deploy
cd `pwd`/linux/advanced/zabbix/latest/agent2 && pwd && make build && make deploy cd `pwd`/linux/advanced/zabbix/latest/agent2 && pwd && make build && make deploy
@ -536,6 +539,7 @@ bundle-atlassian:
@echo "=======================================" @echo "======================================="
cd `pwd`/linux/ecosystem/atlassian/bitbucket/latest && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/bitbucket/latest && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/confluence/latest && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/confluence/latest && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/crowd/latest && pwd && make build && make deploy
# cd `pwd`/linux/ecosystem/atlassian/crucible/latest && pwd && make build && make deploy # cd `pwd`/linux/ecosystem/atlassian/crucible/latest && pwd && make build && make deploy
# cd `pwd`/linux/ecosystem/atlassian/fisheye/latest && pwd && make build && make deploy # cd `pwd`/linux/ecosystem/atlassian/fisheye/latest && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/fisheye-crucible/latest && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/fisheye-crucible/latest && pwd && make build && make deploy
@ -563,6 +567,7 @@ bundle-atlassian:
cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.0 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.0 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.1 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.1 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.2 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/confluence/8/8.0.2 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/crowd/5.1.1 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/jira/9/9.2.1 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/jira/9/9.2.1 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/jira/9/9.3.0 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/jira/9/9.3.0 && pwd && make build && make deploy
cd `pwd`/linux/ecosystem/atlassian/jira/9/9.3.1 && pwd && make build && make deploy cd `pwd`/linux/ecosystem/atlassian/jira/9/9.3.1 && pwd && make build && make deploy

View File

@ -0,0 +1,9 @@
FROM sentry
RUN apt-get update && \
apt-get install -y --no-install-recommends sudo gcc libsasl2-dev libldap2-dev libssl-dev
RUN pip install sentry-ldap-auth
RUN sudo -i -u sentry pip install sentry-ldap-auth

View File

@ -0,0 +1,19 @@
all: app
app:
make build
make deploy
make clean
build:
docker-compose build --compress --parallel --progress plain
deploy:
docker-compose push
clean:
docker container prune -f
docker image prune -f
docker network prune -f
docker volume prune -f
docker system prune -af

View File

@ -0,0 +1,6 @@
version: '3.9'
services:
app:
image: "epicmorg/sentry:latest"
build:
context: .

View File

@ -0,0 +1,60 @@
#############
# LDAP auth #
#############
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfUniqueNamesType
AUTH_LDAP_SERVER_URI = 'ldap://freeipa.example.com:389'
AUTH_LDAP_BIND_DN = 'krbprincipalname=sentry/freeipa.example.com@EXAMPLE.COM,cn=services,cn=accounts,dc=example,dc=com'
AUTH_LDAP_BIND_PASSWORD = 'qwerty123'
AUTH_LDAP_USER_SEARCH = LDAPSearch(
'cn=users,cn=accounts,dc=example,dc=com',
ldap.SCOPE_SUBTREE, '(uid=%(user)s)',
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
"cn=groups,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)"
)
AUTH_LDAP_GROUP_TYPE = GroupOfUniqueNamesType()
AUTH_LDAP_REQUIRE_GROUP = None
AUTH_LDAP_DENY_GROUP = None
AUTH_LDAP_USER_ATTR_MAP = {
"first_name": "givenname",
"last_name": "sn",
"email": "mail"
}
AUTH_LDAP_FIND_GROUP_PERMS = False
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
AUTH_LDAP_DEFAULT_SENTRY_ORGANIZATION = 'Sentry'
AUTH_LDAP_SENTRY_ORGANIZATION_ROLE_TYPE = 'member'
AUTH_LDAP_SENTRY_ORGANIZATION_GLOBAL_ACCESS = True
AUTH_LDAP_SENTRY_SUBSCRIBE_BY_DEFAULT = False
AUTH_LDAP_SENTRY_USERNAME_FIELD = 'cn'
SENTRY_MANAGED_USER_FIELDS = ('email', 'first_name', 'last_name', 'password', )
AUTHENTICATION_BACKENDS = AUTHENTICATION_BACKENDS + (
'sentry_ldap_auth.backend.SentryLdapBackend',
)
# optional, for debugging
import logging
logger = logging.getLogger('django_auth_ldap')
logger.addHandler(logging.StreamHandler())
logger.addHandler(logging.FileHandler('/var/log/sentry_ldap.log'))
logger.setLevel('DEBUG')
LOGGING['overridable'] = ['sentry', 'django_auth_ldap']
LOGGING['loggers']['django_auth_ldap'] = {
'handlers': ['console'],
'level': 'DEBUG'
}

View File

@ -0,0 +1,58 @@
FROM epicmorg/debian:bullseye-jdk8
LABEL maintainer="Atlassian Crowd Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
ENV APP_NAME crowd
# https://confluence.atlassian.com/crowd/important-directories-and-files-78676537.html
ENV CROWD_HOME /var/atlassian/application-data/crowd
ENV CROWD_INSTALL_DIR /opt/atlassian/crowd
ENV CROWD_DB ${CROWD_INSTALL_DIR}/database
# Expose HTTP port
EXPOSE 8095
##################################################################
# Installing
##################################################################
RUN apt-get update \
&& apt-get upgrade -y \
&& apt-get install -y --no-install-recommends fontconfig python3 python3-jinja2 tini \
&& apt-get clean autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
RUN mkdir -p ${CROWD_DB} && \
curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "${CROWD_INSTALL_DIR}" && \
chown -R $RUN_USER:$RUN_GROUP ${CROWD_INSTALL_DIR} && \
sed -i -e 's/-Xms\([0-9]\+[kmg]\) -Xmx\([0-9]\+[kmg]\)/-Xms\${JVM_MINIMUM_MEMORY:=\1} -Xmx\${JVM_MAXIMUM_MEMORY:=\2} \${JVM_SUPPORT_RECOMMENDED_ARGS} -Dcrowd.home=\${CROWD_HOME}/g' ${CROWD_INSTALL_DIR}/apache-tomcat/bin/setenv.sh && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
VOLUME ["${CROWD_HOME}"] # Must be declared after setting perms
VOLUME ["${CROWD_DB}"] # Must be declared after setting perms
WORKDIR $CROWD_HOME
CMD ["/entrypoint.py"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.py \
shutdown-wait.sh \
shared-components/image/entrypoint_helpers.py /
COPY shared-components/support /opt/atlassian/support
COPY config/* /opt/atlassian/etc/
COPY . /tmp

View File

@ -0,0 +1,58 @@
FROM epicmorg/debian:bullseye-jdk11
LABEL maintainer="Atlassian Crowd Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
ENV APP_NAME crowd
# https://confluence.atlassian.com/crowd/important-directories-and-files-78676537.html
ENV CROWD_HOME /var/atlassian/application-data/crowd
ENV CROWD_INSTALL_DIR /opt/atlassian/crowd
ENV CROWD_DB ${CROWD_INSTALL_DIR}/database
# Expose HTTP port
EXPOSE 8095
##################################################################
# Installing
##################################################################
RUN apt-get update \
&& apt-get upgrade -y \
&& apt-get install -y --no-install-recommends fontconfig python3 python3-jinja2 tini \
&& apt-get clean autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
RUN mkdir -p ${CROWD_DB} && \
curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "${CROWD_INSTALL_DIR}" && \
chown -R $RUN_USER:$RUN_GROUP ${CROWD_INSTALL_DIR} && \
sed -i -e 's/-Xms\([0-9]\+[kmg]\) -Xmx\([0-9]\+[kmg]\)/-Xms\${JVM_MINIMUM_MEMORY:=\1} -Xmx\${JVM_MAXIMUM_MEMORY:=\2} \${JVM_SUPPORT_RECOMMENDED_ARGS} -Dcrowd.home=\${CROWD_HOME}/g' ${CROWD_INSTALL_DIR}/apache-tomcat/bin/setenv.sh && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
VOLUME ["${CROWD_HOME}"] # Must be declared after setting perms
VOLUME ["${CROWD_DB}"] # Must be declared after setting perms
WORKDIR $CROWD_HOME
CMD ["/entrypoint.py"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.py \
shutdown-wait.sh \
shared-components/image/entrypoint_helpers.py /
COPY shared-components/support /opt/atlassian/support
COPY config/* /opt/atlassian/etc/
COPY . /tmp

View File

@ -0,0 +1,19 @@
all: app
app:
make build
make deploy
make clean
build:
docker-compose build --compress --parallel --progress plain
deploy:
docker-compose push
clean:
docker container prune -f
docker image prune -f
docker network prune -f
docker volume prune -f
docker system prune -af

View File

@ -0,0 +1,311 @@
![Atlassian Crowd](https://wac-cdn.atlassian.com/dam/jcr:d2a1da52-ae52-4b06-9ab1-da8647a89653/crowd-icon-gradient-blue.svg?cdnVersion=696)
Crowd provides single sign-on and user identity that's easy to use.
Learn more about Crowd: [https://www.atlassian.com/software/crowd][1]
# Contents
[TOC]
# Overview
This Docker container makes it easy to get an instance of Crowd up and running.
Note: ** Use docker version >= 20.10.10 **
# Quick Start
For the `CROWD_HOME` directory that is used to store application data (amongst other things) we recommend mounting a host directory as a [data volume](https://docs.docker.com/engine/tutorials/dockervolumes/#/data-volumes), or via a named volume.
To get started you can use a data volume, or named volumes. In this example we'll use named volumes.
docker volume create --name crowdVolume
docker run -v crowdVolume:/var/atlassian/application-data/crowd --name="crowd" -d -p 8095:8095 atlassian/crowd
**Success**. Crowd is now available on [http://localhost:8095](http://localhost:8095)*
Please ensure your container has the necessary resources allocated to it. See [Supported Platforms][2] for further information.
_* Note: If you are using `docker-machine` on Mac OS X, please use `open http://$(docker-machine ip default):8095` instead._
## Memory / Heap Size
If you need to override Crowd's default memory allocation, you can control the minimum heap (Xms) and maximum heap (Xmx) via the below environment variables.
* `JVM_MINIMUM_MEMORY` (default: 384m)
The minimum heap size of the JVM
* `JVM_MAXIMUM_MEMORY` (default: 768m)
The maximum heap size of the JVM
## Reverse Proxy Settings
If Crowd is run behind a reverse proxy server as [described here][3], then you need to specify extra options to make Crowd aware of the setup. They can be controlled via the below environment variables.
* `ATL_PROXY_NAME` (default: NONE)
The reverse proxy's fully qualified hostname. `CATALINA_CONNECTOR_PROXYNAME`
is also supported for backwards compatability.
* `ATL_PROXY_PORT` (default: NONE)
The reverse proxy's port number via which Crowd is
accessed. `CATALINA_CONNECTOR_PROXYPORT` is also supported for backwards
compatability.
* `ATL_TOMCAT_PORT` (default: 8095)
The port for Tomcat/Crowd to listen on. Depending on your container
deployment method this port may need to be
[exposed and published][docker-expose].
* `ATL_TOMCAT_SCHEME` (default: http)
The protocol via which Crowd is accessed. `CATALINA_CONNECTOR_SCHEME` is also
supported for backwards compatability.
* `ATL_TOMCAT_SECURE` (default: false)
Set 'true' if `ATL_TOMCAT_SCHEME` is 'https'. `CATALINA_CONNECTOR_SECURE` is
also supported for backwards compatability.
The following Tomcat/Catalina options are also supported. For more information,
see https://tomcat.apache.org/tomcat-8.5-doc/config/index.html.
* `ATL_TOMCAT_MGMT_PORT` (default: 8000)
* `ATL_TOMCAT_MAXTHREADS` (default: 100)
* `ATL_TOMCAT_MINSPARETHREADS` (default: 10)
* `ATL_TOMCAT_CONNECTIONTIMEOUT` (default: 20000)
* `ATL_TOMCAT_ENABLELOOKUPS` (default: false)
* `ATL_TOMCAT_PROTOCOL` (default: HTTP/1.1)
* `ATL_TOMCAT_ACCEPTCOUNT` (default: 10)
* `ATL_TOMCAT_MAXHTTPHEADERSIZE` (default: 8192)
## JVM Configuration
If you need to pass additional JVM arguments to Crowd, such as specifying a custom trust store, you can add them via the below environment variable
* `JVM_SUPPORT_RECOMMENDED_ARGS`
Additional JVM arguments for Crowd
Example:
docker run -e JVM_SUPPORT_RECOMMENDED_ARGS=-Djavax.net.ssl.trustStore=/var/atlassian/application-data/crowd/cacerts -v crowdVolume:/var/atlassian/application-data/crowd --name="crowd" -d -p 8095:8095 atlassian/crowd
## Data Center configuration
This docker image can be run as part of a [Data Center][4] cluster. You can
specify the following properties to start Crowd as a Data Center node,
instead of manually configuring a cluster. See [Installing Crowd Data
Center][5] for more information.
## Container Configuration
* `SET_PERMISSIONS` (default: true)
Define whether to set home directory permissions on startup. Set to `false` to disable
this behaviour.
## Advanced Configuration
As mentioned at the top of this section, the settings from the environment are
used to populate the application configuration on the container startup. However
in some cases you may wish to customise the settings in ways that are not
supported by the environment variables above. In this case, it is possible to
modify the base templates to add your own configuration. There are three main
ways of doing this; modify our repository to your own image, build a new image
from the existing one, or provide new templates at startup. We will briefly
outline this methods here, but in practice how you do this will depend on your
needs.
#### Building your own image
* Clone the Atlassian repository at https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/
* Modify or replace the [Jinja](https://jinja.palletsprojects.com/) templates
under `config`; _NOTE_: The files must have the `.j2` extensions. However you
don't have to use template variables if you don't wish.
* Build the new image with e.g: `docker build --tag my-crowd-image --build-arg CROWD_VERSION=3.x.x .`
* Optionally push to a registry, and deploy.
#### Build a new image from the existing one
* Create a new `Dockerfile`, which starts with the Atlassian Crowd base image e.g: `FROM atlassian/crowd:latest`.
* Use a `COPY` line to overwrite the provided templates.
* Build, push and deploy the new image as above.
#### Overwrite the templates at runtime
There are two main ways of doing this:
* If your container is going to be long-lived, you can create it, modify the
installed templates under `/opt/atlassian/etc/`, and then run it.
* Alternatively, you can create a volume containing your alternative templates,
and mount it over the provided templates at runtime
with `--volume my-config:/opt/atlassian/etc/`.
# Shared directory and user IDs
By default the Crowd application runs as the user `crowd`, with a UID
and GID of 2004. Consequently this UID must have write access to the shared
filesystem. If for some reason a different UID must be used, there are a number
of options available:
* The Docker image can be rebuilt with a different UID.
* Under Linux, the UID can be remapped using
[user namespace remapping][7].
To preserve strict permissions for certain configuration files, this container starts as
`root` to perform bootstrapping before running Crowd under a non-privileged user
account. If you wish to start the container as a non-root user, please note that Tomcat
configuration will be skipped and a warning will be logged. You may still apply custom
configuration in this situation by mounting configuration files directly, e.g.
by mounting your own server.xml file directly to
`/opt/atlassian/crowd/apache-tomcat/conf/server.xml`
# Upgrade
To upgrade to a more recent version of Crowd you can simply stop the `crowd` container and start a new one based on a more recent image:
docker stop crowd
docker rm crowd
docker run ... (See above)
As your data is stored in the data volume directory on the host it will still be available after the upgrade.
_Note: Please make sure that you **don't** accidentally remove the `crowd` container and its volumes using the `-v` option._
# Backup
For evaluations you can use the built-in database that will store its files in the Crowd home directory. In that case it is sufficient to create a backup archive of the docker volume.
If you're using an external database, you can configure Crowd to make a backup automatically each night. This will back up the current state, including the database to the `crowdVolume` docker volume, which can then be archived. Alternatively you can backup the database separately, and continue to create a backup archive of the docker volume to back up the Crowd Home directory.
Read more about data recovery and backups: [Backing Up and Restoring Data][6]
# Versioning
The `latest` tag matches the most recent release of Atlassian Crowd. Thus `atlassian/crowd:latest` will use the newest version of Crowd available.
Alternatively you can use a specific major, major.minor, or major.minor.patch version of Crowd by using a version number tag:
* `atlassian/crowd:3`
* `atlassian/crowd:3.2`
* `atlassian/crowd:3.2.3`
All versions from 3.0+ are available
# Supported JDK versions
All the Atlassian Docker images are now JDK11 only, and generated from the
[official Eclipse Temurin OpenJDK Docker images](https://hub.docker.com/_/eclipse-temurin).
The Docker images follow the [Atlassian Support end-of-life
policy](https://confluence.atlassian.com/support/atlassian-support-end-of-life-policy-201851003.html);
images for unsupported versions of the products remain available but will no longer
receive updates or fixes.
Historically, we have also generated other versions of the images, including
JDK8, Alpine, and 'slim' versions of the JDK. These legacy images still exist in
Docker Hub, however they should be considered deprecated, and do not receive
updates or fixes.
If for some reason you need a different version, see "Building your own image"
above.
# Supported architectures
Currently the Atlassian Docker images are built for the `linux/amd64` target
platform; we do not have other architectures on our roadmap at this
point. However the Dockerfiles and support tooling have now had all
architecture-specific components removed, so if necessary it is possible to
build images for any platform supported by Docker.
## Building on the target architecture
Note: This method is known to work on Mac M1 and AWS ARM64 machines, but has not
be extensively tested.
The simplest method of getting a platform image is to build it on a target
machine. The following assumes you have git and Docker installed. You will also
need to know which version of Crowd you want to build; substitute
`CROWD_VERSION=x.x.x` with your required version:
```
git clone --recurse-submodule https://bitbucket.org/atlassian-docker/docker-atlassian-crowd.git
cd docker-atlassian-crowd
docker build --tag my-image --build-arg CROWD_VERSION=x.x.x .
```
This image can be pushed up to your own Docker Hub or private repository.
# Troubleshooting
These images include built-in scripts to assist in performing common JVM diagnostic tasks.
## Thread dumps
`/opt/atlassian/support/thread-dumps.sh` can be run via `docker exec` to easily trigger the collection of thread
dumps from the containerized application. For example:
docker exec my_crowd /opt/atlassian/support/thread-dumps.sh
By default this script will collect 10 thread dumps at 5 second intervals. This can
be overridden by passing a custom value for the count and interval, by using `-c` / `--count`
and `-i` / `--interval` respectively. For example, to collect 20 thread dumps at 3 second intervals:
docker exec my_container /opt/atlassian/support/thread-dumps.sh --count 20 --interval 3
Thread dumps will be written to `$APP_HOME/thread_dumps/<date>`.
Note: By default this script will also capture output from top run in 'Thread-mode'. This can
be disabled by passing `-n` / `--no-top`
## Heap dump
`/opt/atlassian/support/heap-dump.sh` can be run via `docker exec` to easily trigger the collection of a heap
dump from the containerized application. For example:
docker exec my_container /opt/atlassian/support/heap-dump.sh
A heap dump will be written to `$APP_HOME/heap.bin`. If a file already exists at this
location, use `-f` / `--force` to overwrite the existing heap dump file.
## Manual diagnostics
The `jcmd` utility is also included in these images and can be used by starting a `bash` shell
in the running container:
docker exec -it my_container /bin/bash
# Support
For product support, go to:
* https://support.atlassian.com/crowd/
You can also visit the [Atlassian Data Center on
Kubernetes](https://community.atlassian.com/t5/Atlassian-Data-Center-on/gh-p/DC_Kubernetes)
forum for discussion on running Atlassian Data Center products in containers.
# Changelog
For a detailed list of changes to the Docker image configuration see [the Git
commit history](https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/commits/).
# License
Copyright © 2019 Atlassian Corporation Pty Ltd.
Licensed under the Apache License, Version 2.0.
[1]: https://www.atlassian.com/software/crowd
[2]: https://confluence.atlassian.com/crowd/supported-platforms-191851.html
[3]: https://confluence.atlassian.com/crowd031/integrating-crowd-with-apache-949753124.html
[4]: https://confluence.atlassian.com/crowd/crowd-data-center-935372453.html
[5]: https://confluence.atlassian.com/crowd/installing-crowd-data-center-935369773.html
[6]: https://confluence.atlassian.com/crowd/backing-up-and-restoring-data-36470797.html
[7]: https://docs.docker.com/engine/security/userns-remap/

View File

@ -0,0 +1,47 @@
<?xml version="1.0" encoding="UTF-8"?>
<Server port="{{ atl_tomcat_mgmt_port | default('8020') }}" shutdown="SHUTDOWN">
<Service name="Catalina">
<Connector acceptCount="{{ atl_tomcat_acceptcount | default('100') }}"
connectionTimeout="{{ atl_tomcat_connectiontimeout | default('20000') }}"
disableUploadTimeout="true"
enableLookups="{{ atl_tomcat_enablelookups | default('false') }}"
protocol="{{ atl_tomcat_protocol | default('HTTP/1.1') }}"
maxHttpHeaderSize="{{ atl_tomcat_maxhttpheadersize | default('8192') }}"
maxThreads="{{ atl_tomcat_maxthreads | default('150') }}"
minSpareThreads="{{ atl_tomcat_minsparethreads | default('25') }}"
port="{{ atl_tomcat_port | default('8095') }}"
secure="{{ atl_tomcat_secure | default(catalina_connector_secure) | default('false') }}"
scheme="{{ atl_tomcat_scheme | default(catalina_connector_scheme) | default('http') }}"
proxyName="{{ atl_proxy_name | default(catalina_connector_proxyname) | default('') }}"
proxyPort="{{ atl_proxy_port | default(catalina_connector_proxyport) | default('') }}"
redirectPort="{{ atl_tomcat_redirectport | default('8443') }}"
useBodyEncodingForURI="true"
URIEncoding="UTF-8"
compression="on"
sendReasonPhrase="true"
compressableMimeType="text/html,text/xml,application/xml,text/plain,text/css,application/json,application/javascript,application/x-javascript" />
<Engine defaultHost="localhost" name="Catalina">
<Host appBase="webapps" autoDeploy="true" name="localhost" unpackWARs="true"/>
</Engine>
<!-- To connect to an external web server (typically Apache) -->
<!-- Define an AJP 1.3 Connector on port 8009 -->
<!--
<Connector port="8009" enableLookups="false" redirectPort="8443" protocol="AJP/1.3" />
-->
</Service>
<!-- Security listener. Documentation at /docs/config/listeners.html
<Listener className="org.apache.catalina.security.SecurityListener" />
-->
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
</Server>

View File

@ -0,0 +1,26 @@
version: '3.9'
services:
app:
image: "epicmorg/crowd:${RELEASE}"
build:
context: .
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
app-jdk11:
image: "epicmorg/crowd:${RELEASE}-jdk11"
build:
context: .
dockerfile: Dockerfile.jdk11
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
# app-jdk17:
# image: "epicmorg/crowd:${RELEASE}-jdk17"
# build:
# context: .
# dockerfile: Dockerfile.jdk17
# args:
# RELEASE: ${RELEASE}
# DOWNLOAD_URL: ${DOWNLOAD_URL}
#

View File

@ -0,0 +1,13 @@
#!/usr/bin/python3 -B
from entrypoint_helpers import env, gen_cfg, gen_container_id, exec_app
RUN_USER = env['run_user']
RUN_GROUP = env['run_group']
CROWD_INSTALL_DIR = env['crowd_install_dir']
CROWD_HOME = env['crowd_home']
gen_cfg('server.xml.j2', f'{CROWD_INSTALL_DIR}/apache-tomcat/conf/server.xml')
exec_app([f'{CROWD_INSTALL_DIR}/start_crowd.sh', '-fg'], CROWD_HOME, name='Crowd', env_cleanup=True)

View File

@ -0,0 +1,5 @@
__pycache__/
.pytest_cache/
Pipfile
Pipfile.lock
*.pyc

View File

@ -0,0 +1,13 @@
Copyright © 2019 Atlassian Corporation Pty Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You may
obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.

View File

@ -0,0 +1,36 @@
# Overview
This repository provides common utilities & components for building and testing Docker
images for Atlassian's Server and Data Center products.
The following components are provided:
### Image builds
* Support tools
Scripts for performing common diagnostic operations, i.e. taking thread dumps and heap
dumps.
* Entrypoint helpers
Common components for bootstrapping and starting apps.
* README publishing
Utility for publishing README's to Docker Hub, without relying on Docker Hub's own
automated builds.
### Image testing
* Fixtures
Common testing fixtures that can be reused for testing Docker builds of Atlassian
apps.
* Helpers
Helper functions for parsing configuration files, checking running processes and
retrieving http responses.

View File

@ -0,0 +1,146 @@
import sys
import os
import pwd
import shutil
import logging
import jinja2 as j2
import uuid
import base64
logging.basicConfig(level=logging.DEBUG)
######################################################################
# Setup inputs and outputs
# Import all ATL_* and Dockerfile environment variables. We lower-case
# these for compatability with Ansible template convention. We also
# support CATALINA variables from older versions of the Docker images
# for backwards compatability, if the new version is not set.
env = {k.lower(): v
for k, v in os.environ.items()}
# Setup Jinja2 for templating
jenv = j2.Environment(
loader=j2.FileSystemLoader('/opt/atlassian/etc/'),
autoescape=j2.select_autoescape(['xml']))
######################################################################
# Utils
def set_perms(path, user, group, mode):
try:
shutil.chown(path, user=user, group=group)
except PermissionError:
logging.warning(f"Could not chown path {path} to {user}:{group} due to insufficient permissions.")
try:
os.chmod(path, mode)
except PermissionError:
logging.warning(f"Could not chmod path {path} to {mode} due to insufficient permissions.")
def set_tree_perms(path, user, group, mode):
set_perms(path, user, group, mode)
for dirpath, dirnames, filenames in os.walk(path):
set_perms(path, user, group, mode)
for filename in filenames:
set_perms(path, user, group, mode)
def check_perms(path, uid, gid, mode):
stat = os.stat(path)
return all([
stat.st_uid == int(uid),
stat.st_gid == int(gid),
stat.st_mode & mode == mode
])
def gen_cfg(tmpl, target, user='root', group='root', mode=0o644, overwrite=True):
if not overwrite and os.path.exists(target):
logging.info(f"{target} exists; skipping.")
return
logging.info(f"Generating {target} from template {tmpl}")
cfg = jenv.get_template(tmpl).render(env)
try:
with open(target, 'w') as fd:
fd.write(cfg)
except (OSError, PermissionError):
logging.warning(f"Permission problem writing '{target}'; skipping")
else:
set_tree_perms(target, user, group, mode)
def gen_container_id():
env['uuid'] = uuid.uuid4().hex
with open('/etc/container_id') as fd:
lcid = fd.read()
if lcid != '':
env['local_container_id'] = lcid
def str2bool(v):
if str(v).lower() in ('yes', 'true', 't', 'y', '1'):
return True
return False
def unset_secure_vars():
secure_keywords = ('PASS', 'SECRET', 'TOKEN')
for key in os.environ:
if any(kw in key.upper() for kw in secure_keywords):
logging.warning(f"Unsetting environment var {key}")
del os.environ[key]
######################################################################
# Application startup utilities
def check_permissions(home_dir):
"""Ensure the home directory is set to minimal permissions"""
if str2bool(env.get('set_permissions') or True) and check_perms(home_dir, env['run_uid'], env['run_gid'], 0o700) is False:
set_tree_perms(home_dir, env['run_user'], env['run_group'], 0o700)
logging.info(f"User is currently root. Will change directory ownership and downgrade run user to {env['run_user']}")
def drop_root(run_user):
logging.info(f"User is currently root. Will downgrade run user to {run_user}")
pwd_entry = pwd.getpwnam(run_user)
os.environ['USER'] = run_user
os.environ['HOME'] = pwd_entry.pw_dir
os.environ['SHELL'] = pwd_entry.pw_shell
os.environ['LOGNAME'] = run_user
os.setgid(pwd_entry.pw_gid)
os.setuid(pwd_entry.pw_uid)
def write_pidfile():
app_home = env[f"{env['app_name'].lower()}_home"]
pidfile = f"{app_home}/docker-app.pid"
with open(pidfile, 'wt', encoding='utf-8') as fd:
pid = os.getpid()
fd.write(str(pid))
def exec_app(start_cmd_v, home_dir, name='app', env_cleanup=False):
"""Run the supplied application startup command.
Arguments:
start_cmd -- A list of the command and its arguments.
home_dir -- Application home directory.
name -- (Optional) The name to display in the log message.
env_cleanup -- (Default: False) Remove possibly sensitive env-vars.
"""
if os.getuid() == 0:
check_permissions(home_dir)
drop_root(env['run_user'])
write_pidfile()
if env_cleanup:
unset_secure_vars()
cmd = start_cmd_v[0]
args = start_cmd_v
logging.info(f"Running {name} with command '{cmd}', arguments {args}")
os.execv(cmd, args)

View File

@ -0,0 +1,63 @@
# -------------------------------------------------------------------------------------
# Common bootstrapping for support scripts (get app details: home directory, PID, etc.)
# -------------------------------------------------------------------------------------
# Set up Java utils
JCMD="${JAVA_HOME}/bin/jcmd"
# Set up app info
APP_NAME="$(set | grep '_INSTALL_DIR' | awk -F'_' '{print $1}')"
case "${APP_NAME}" in
BITBUCKET )
BOOTSTRAP_PROC="com.atlassian.bitbucket.internal.launcher.BitbucketServerLauncher"
;;
* )
BOOTSTRAP_PROC="org.apache.catalina.startup.Bootstrap"
;;
esac
# Get value of <app>_INSTALL_DIR
function get_app_install_dir {
local APP_INSTALL_DIR=${APP_NAME}_INSTALL_DIR
echo ${!APP_INSTALL_DIR}
}
# Get value of <app>_HOME
function get_app_home {
local APP_HOME=${APP_NAME}_HOME
echo ${!APP_HOME}
}
# Get app PID. APP_PID is the root process. JVM_APP_PID will generally
# be the same as APP_PID; the exception is Bitbucket running with
# Elasticsearch enabled.
JVM_APP_PID=$(${JCMD} | grep "${BOOTSTRAP_PROC}" | awk '{print $1}')
PIDFILE="$(get_app_home)/docker-app.pid"
if [[ -f $PIDFILE ]]; then
APP_PID=$(<$PIDFILE)
else
APP_PID=$JVM_APP_PID
fi
# Set valid getopt options
function set_valid_options {
OPTS=$(getopt -o "$1" --long "$2" -n 'parse-options' -- "$@")
if [ $? != 0 ]; then
echo "Failed parsing options." >&2
exit 1
fi
eval set -- "$OPTS"
}
# Run command(s)
function run_as_runuser {
if [ $(id -u) = 0 ]; then
su "${RUN_USER}" -c '"$@"' -- argv0 "$@"
else
$@
fi
}

View File

@ -0,0 +1,62 @@
#!/bin/bash
# -------------------------------------------------------------------------------------
# Heap collector for containerized Atlassian applications
#
# This script can be run via `docker exec` to easily trigger the collection of a heap
# dump from the containerized application. For example:
#
# $ docker exec -it my_jira /opt/atlassian/support/heap-dump.sh
#
# A heap dump will be written to $APP_HOME/heap.bin. If a file already exists at this
# location, use -f/--force to overwrite the existing heap dump file.
#
# -------------------------------------------------------------------------------------
set -euo pipefail
# Set up common vars like APP_NAME, APP_HOME, APP_PID
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
# Set up script opts
set_valid_options "f" "force"
# Set defaults
OVERWRITE="false"
# Parse opts
while true; do
case "${1-}" in
-f | --force ) OVERWRITE="true"; shift ;;
* ) break ;;
esac
done
echo "Atlassian heap dump collector"
echo "App: ${APP_NAME}"
echo "Run user: ${RUN_USER}"
echo
OUT_FILE="$(get_app_home)/heap.bin"
if [[ -f "${OUT_FILE}" ]]; then
echo "A previous heap dump already exists at ${OUT_FILE}."
if [[ "${OVERWRITE}" == "true" ]]; then
echo "Removing previous heap dump file"
echo
rm "${OUT_FILE}"
else
echo "Use -f/--force to overwrite the existing heap dump."
exit
fi
fi
echo "Generating heap dump"
run_as_runuser ${JCMD} ${JVM_APP_PID} GC.heap_dump -all ${OUT_FILE} > /dev/null
echo
echo "Heap dump has been written to ${OUT_FILE}"

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Send the specified signal to the main application process.
#
# If 'wait' is added as a second parameter, wait for the process to
# terminate. NOTE: This waits indefinitely, but may be killed by
# higher-level processes (e.g. Docker/Kubernetes)
set -e
SIG=$1
WAIT=$2
SHDIR=$(dirname $0)
source ${SHDIR}/common.sh
kill -${SIG} ${JVM_APP_PID}
if [[ "${WAIT}" == "wait" ]]; then
${SHDIR}/wait-pid.sh $JVM_APP_PID
fi

View File

@ -0,0 +1,74 @@
#!/bin/bash
# -------------------------------------------------------------------------------------
# Thread dumps collector for containerized Atlassian applications
#
# This script can be run via `docker exec` to easily trigger the collection of thread
# dumps from the containerized application. For example:
#
# $ docker exec my_jira /opt/atlassian/support/thread-dumps.sh
#
# By default this script will collect 10 thread dumps at a 5 second interval. This can
# be overridden by passing a custom value for the count and interval, respectively. For
# example, to collect 20 thread dumps at a 3 second interval:
#
# $ docker exec my_jira /opt/atlassian/support/thread-dumps.sh -c 20 -i 3
#
# Note: By default this script will capture output from top run in 'Thread-mode'. This can
# be disabled by passing --no-top
# -------------------------------------------------------------------------------------
set -euo pipefail
# Set up common vars like APP_NAME, APP_HOME, APP_PID
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
# Set up script opts
set_valid_options "c:i:n" "count:,interval:,no-top"
# Set defaults
COUNT="10"
INTERVAL="5"
NO_TOP="false"
# Parse opts
while true; do
case "${1-}" in
-c | --count ) COUNT="$2"; shift 2 ;;
-i | --interval ) INTERVAL="$2"; shift 2 ;;
-n | --no-top ) NO_TOP="true"; shift ;;
* ) break ;;
esac
done
echo "Atlassian thread dump collector"
echo "App: ${APP_NAME}"
echo "Run user: ${RUN_USER}"
echo
echo "${COUNT} thread dumps will be generated at a ${INTERVAL} second interval"
if [[ "${NO_TOP}" == "false" ]]; then
echo "top 'Threads-mode' output will also be collected for ${APP_NAME} with every thread dump"
fi
echo
OUT_DIR="$(get_app_home)/thread_dumps/$(date +'%Y-%m-%d_%H-%M-%S')"
run_as_runuser mkdir -p ${OUT_DIR}
for i in $(seq ${COUNT}); do
echo "Generating thread dump ${i} of ${COUNT}"
if [[ "${NO_TOP}" == "false" ]]; then
run_as_runuser top -b -H -p $JVM_APP_PID -n 1 > "${OUT_DIR}/${APP_NAME}_CPU_USAGE.$(date +%s).txt"
fi
run_as_runuser ${JCMD} ${JVM_APP_PID} Thread.print -l > "${OUT_DIR}/${APP_NAME}_THREADS.$(date +%s).txt"
if [[ ! "${i}" == "${COUNT}" ]]; then
sleep ${INTERVAL}
fi
done
echo
echo "Thread dumps have been written to ${OUT_DIR}"

View File

@ -0,0 +1,13 @@
#!/bin/bash
# Wait for the specified process ID to terminate. NOTE: This waits
# indefinitely, but may be killed by higher-level processes
# (e.g. Docker/Kubernetes)
set -e
WAIT_PID=$1
while [[ -e /proc/${WAIT_PID} ]]; do
/bin/sleep 0.5
done

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
host=$1
port=$2
secs=120
echo -n "Waiting for TCP connection to $host:$port..."
for i in `seq $secs`; do
if nc -z $host $port > /dev/null ; then
echo OK
exit 0
fi
echo -n .
/bin/sleep 1
done
echo FAILED
exit -1

View File

@ -0,0 +1,30 @@
#!/bin/bash
##############################################################################
#
# This script will initiate a clean shutdown of the application, and
# then wait for the process to finish before returning. This is
# primarily intended for use in environments that provide an orderly
# shutdown mechanism, in particular the Kubernetes `preStop` hook.
#
# This script will wait for the process to exit indefinitely; however
# most run-time tools (including Docker and Kubernetes) have their own
# shutdown timeouts that will send a SIGKILL if the grace period is
# exceeded.
#
##############################################################################
set -e
source /opt/atlassian/support/common.sh
echo "Shutting down Crowd..."
echo ${JVM_APP_PID} > ${CROWD_INSTALL_DIR}/apache-tomcat/work/catalina.pid
if [[ "${UID}" == 0 ]]; then
/bin/su ${RUN_USER} -c ${CROWD_INSTALL_DIR}/stop_crowd.sh;
else
${CROWD_INSTALL_DIR}/stop_crowd.sh;
fi
/opt/atlassian/support/wait-pid.sh ${JVM_APP_PID}

View File

@ -0,0 +1,3 @@
.git
scripts
.idea

View File

@ -0,0 +1,4 @@
RELEASE=5.1.1
DOWNLOAD_URL=https://product-downloads.atlassian.com/software/crowd/downloads/atlassian-crowd-5.1.1.tar.gz

View File

@ -0,0 +1,58 @@
FROM epicmorg/debian:bullseye-jdk8
LABEL maintainer="Atlassian Crowd Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
ENV APP_NAME crowd
# https://confluence.atlassian.com/crowd/important-directories-and-files-78676537.html
ENV CROWD_HOME /var/atlassian/application-data/crowd
ENV CROWD_INSTALL_DIR /opt/atlassian/crowd
ENV CROWD_DB ${CROWD_INSTALL_DIR}/database
# Expose HTTP port
EXPOSE 8095
##################################################################
# Installing
##################################################################
RUN apt-get update \
&& apt-get upgrade -y \
&& apt-get install -y --no-install-recommends fontconfig python3 python3-jinja2 tini \
&& apt-get clean autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
RUN mkdir -p ${CROWD_DB} && \
curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "${CROWD_INSTALL_DIR}" && \
chown -R $RUN_USER:$RUN_GROUP ${CROWD_INSTALL_DIR} && \
sed -i -e 's/-Xms\([0-9]\+[kmg]\) -Xmx\([0-9]\+[kmg]\)/-Xms\${JVM_MINIMUM_MEMORY:=\1} -Xmx\${JVM_MAXIMUM_MEMORY:=\2} \${JVM_SUPPORT_RECOMMENDED_ARGS} -Dcrowd.home=\${CROWD_HOME}/g' ${CROWD_INSTALL_DIR}/apache-tomcat/bin/setenv.sh && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
VOLUME ["${CROWD_HOME}"] # Must be declared after setting perms
VOLUME ["${CROWD_DB}"] # Must be declared after setting perms
WORKDIR $CROWD_HOME
CMD ["/entrypoint.py"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.py \
shutdown-wait.sh \
shared-components/image/entrypoint_helpers.py /
COPY shared-components/support /opt/atlassian/support
COPY config/* /opt/atlassian/etc/
COPY . /tmp

View File

@ -0,0 +1,58 @@
FROM epicmorg/debian:bullseye-jdk11
LABEL maintainer="Atlassian Crowd Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
ENV APP_NAME crowd
# https://confluence.atlassian.com/crowd/important-directories-and-files-78676537.html
ENV CROWD_HOME /var/atlassian/application-data/crowd
ENV CROWD_INSTALL_DIR /opt/atlassian/crowd
ENV CROWD_DB ${CROWD_INSTALL_DIR}/database
# Expose HTTP port
EXPOSE 8095
##################################################################
# Installing
##################################################################
RUN apt-get update \
&& apt-get upgrade -y \
&& apt-get install -y --no-install-recommends fontconfig python3 python3-jinja2 tini \
&& apt-get clean autoclean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
RUN mkdir -p ${CROWD_DB} && \
curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "${CROWD_INSTALL_DIR}" && \
chown -R $RUN_USER:$RUN_GROUP ${CROWD_INSTALL_DIR} && \
sed -i -e 's/-Xms\([0-9]\+[kmg]\) -Xmx\([0-9]\+[kmg]\)/-Xms\${JVM_MINIMUM_MEMORY:=\1} -Xmx\${JVM_MAXIMUM_MEMORY:=\2} \${JVM_SUPPORT_RECOMMENDED_ARGS} -Dcrowd.home=\${CROWD_HOME}/g' ${CROWD_INSTALL_DIR}/apache-tomcat/bin/setenv.sh && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
VOLUME ["${CROWD_HOME}"] # Must be declared after setting perms
VOLUME ["${CROWD_DB}"] # Must be declared after setting perms
WORKDIR $CROWD_HOME
CMD ["/entrypoint.py"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.py \
shutdown-wait.sh \
shared-components/image/entrypoint_helpers.py /
COPY shared-components/support /opt/atlassian/support
COPY config/* /opt/atlassian/etc/
COPY . /tmp

View File

@ -0,0 +1,19 @@
all: app
app:
make build
make deploy
make clean
build:
docker-compose build --compress --parallel --progress plain
deploy:
docker-compose push
clean:
docker container prune -f
docker image prune -f
docker network prune -f
docker volume prune -f
docker system prune -af

View File

@ -0,0 +1,311 @@
![Atlassian Crowd](https://wac-cdn.atlassian.com/dam/jcr:d2a1da52-ae52-4b06-9ab1-da8647a89653/crowd-icon-gradient-blue.svg?cdnVersion=696)
Crowd provides single sign-on and user identity that's easy to use.
Learn more about Crowd: [https://www.atlassian.com/software/crowd][1]
# Contents
[TOC]
# Overview
This Docker container makes it easy to get an instance of Crowd up and running.
Note: ** Use docker version >= 20.10.10 **
# Quick Start
For the `CROWD_HOME` directory that is used to store application data (amongst other things) we recommend mounting a host directory as a [data volume](https://docs.docker.com/engine/tutorials/dockervolumes/#/data-volumes), or via a named volume.
To get started you can use a data volume, or named volumes. In this example we'll use named volumes.
docker volume create --name crowdVolume
docker run -v crowdVolume:/var/atlassian/application-data/crowd --name="crowd" -d -p 8095:8095 atlassian/crowd
**Success**. Crowd is now available on [http://localhost:8095](http://localhost:8095)*
Please ensure your container has the necessary resources allocated to it. See [Supported Platforms][2] for further information.
_* Note: If you are using `docker-machine` on Mac OS X, please use `open http://$(docker-machine ip default):8095` instead._
## Memory / Heap Size
If you need to override Crowd's default memory allocation, you can control the minimum heap (Xms) and maximum heap (Xmx) via the below environment variables.
* `JVM_MINIMUM_MEMORY` (default: 384m)
The minimum heap size of the JVM
* `JVM_MAXIMUM_MEMORY` (default: 768m)
The maximum heap size of the JVM
## Reverse Proxy Settings
If Crowd is run behind a reverse proxy server as [described here][3], then you need to specify extra options to make Crowd aware of the setup. They can be controlled via the below environment variables.
* `ATL_PROXY_NAME` (default: NONE)
The reverse proxy's fully qualified hostname. `CATALINA_CONNECTOR_PROXYNAME`
is also supported for backwards compatability.
* `ATL_PROXY_PORT` (default: NONE)
The reverse proxy's port number via which Crowd is
accessed. `CATALINA_CONNECTOR_PROXYPORT` is also supported for backwards
compatability.
* `ATL_TOMCAT_PORT` (default: 8095)
The port for Tomcat/Crowd to listen on. Depending on your container
deployment method this port may need to be
[exposed and published][docker-expose].
* `ATL_TOMCAT_SCHEME` (default: http)
The protocol via which Crowd is accessed. `CATALINA_CONNECTOR_SCHEME` is also
supported for backwards compatability.
* `ATL_TOMCAT_SECURE` (default: false)
Set 'true' if `ATL_TOMCAT_SCHEME` is 'https'. `CATALINA_CONNECTOR_SECURE` is
also supported for backwards compatability.
The following Tomcat/Catalina options are also supported. For more information,
see https://tomcat.apache.org/tomcat-8.5-doc/config/index.html.
* `ATL_TOMCAT_MGMT_PORT` (default: 8000)
* `ATL_TOMCAT_MAXTHREADS` (default: 100)
* `ATL_TOMCAT_MINSPARETHREADS` (default: 10)
* `ATL_TOMCAT_CONNECTIONTIMEOUT` (default: 20000)
* `ATL_TOMCAT_ENABLELOOKUPS` (default: false)
* `ATL_TOMCAT_PROTOCOL` (default: HTTP/1.1)
* `ATL_TOMCAT_ACCEPTCOUNT` (default: 10)
* `ATL_TOMCAT_MAXHTTPHEADERSIZE` (default: 8192)
## JVM Configuration
If you need to pass additional JVM arguments to Crowd, such as specifying a custom trust store, you can add them via the below environment variable
* `JVM_SUPPORT_RECOMMENDED_ARGS`
Additional JVM arguments for Crowd
Example:
docker run -e JVM_SUPPORT_RECOMMENDED_ARGS=-Djavax.net.ssl.trustStore=/var/atlassian/application-data/crowd/cacerts -v crowdVolume:/var/atlassian/application-data/crowd --name="crowd" -d -p 8095:8095 atlassian/crowd
## Data Center configuration
This docker image can be run as part of a [Data Center][4] cluster. You can
specify the following properties to start Crowd as a Data Center node,
instead of manually configuring a cluster. See [Installing Crowd Data
Center][5] for more information.
## Container Configuration
* `SET_PERMISSIONS` (default: true)
Define whether to set home directory permissions on startup. Set to `false` to disable
this behaviour.
## Advanced Configuration
As mentioned at the top of this section, the settings from the environment are
used to populate the application configuration on the container startup. However
in some cases you may wish to customise the settings in ways that are not
supported by the environment variables above. In this case, it is possible to
modify the base templates to add your own configuration. There are three main
ways of doing this; modify our repository to your own image, build a new image
from the existing one, or provide new templates at startup. We will briefly
outline this methods here, but in practice how you do this will depend on your
needs.
#### Building your own image
* Clone the Atlassian repository at https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/
* Modify or replace the [Jinja](https://jinja.palletsprojects.com/) templates
under `config`; _NOTE_: The files must have the `.j2` extensions. However you
don't have to use template variables if you don't wish.
* Build the new image with e.g: `docker build --tag my-crowd-image --build-arg CROWD_VERSION=3.x.x .`
* Optionally push to a registry, and deploy.
#### Build a new image from the existing one
* Create a new `Dockerfile`, which starts with the Atlassian Crowd base image e.g: `FROM atlassian/crowd:latest`.
* Use a `COPY` line to overwrite the provided templates.
* Build, push and deploy the new image as above.
#### Overwrite the templates at runtime
There are two main ways of doing this:
* If your container is going to be long-lived, you can create it, modify the
installed templates under `/opt/atlassian/etc/`, and then run it.
* Alternatively, you can create a volume containing your alternative templates,
and mount it over the provided templates at runtime
with `--volume my-config:/opt/atlassian/etc/`.
# Shared directory and user IDs
By default the Crowd application runs as the user `crowd`, with a UID
and GID of 2004. Consequently this UID must have write access to the shared
filesystem. If for some reason a different UID must be used, there are a number
of options available:
* The Docker image can be rebuilt with a different UID.
* Under Linux, the UID can be remapped using
[user namespace remapping][7].
To preserve strict permissions for certain configuration files, this container starts as
`root` to perform bootstrapping before running Crowd under a non-privileged user
account. If you wish to start the container as a non-root user, please note that Tomcat
configuration will be skipped and a warning will be logged. You may still apply custom
configuration in this situation by mounting configuration files directly, e.g.
by mounting your own server.xml file directly to
`/opt/atlassian/crowd/apache-tomcat/conf/server.xml`
# Upgrade
To upgrade to a more recent version of Crowd you can simply stop the `crowd` container and start a new one based on a more recent image:
docker stop crowd
docker rm crowd
docker run ... (See above)
As your data is stored in the data volume directory on the host it will still be available after the upgrade.
_Note: Please make sure that you **don't** accidentally remove the `crowd` container and its volumes using the `-v` option._
# Backup
For evaluations you can use the built-in database that will store its files in the Crowd home directory. In that case it is sufficient to create a backup archive of the docker volume.
If you're using an external database, you can configure Crowd to make a backup automatically each night. This will back up the current state, including the database to the `crowdVolume` docker volume, which can then be archived. Alternatively you can backup the database separately, and continue to create a backup archive of the docker volume to back up the Crowd Home directory.
Read more about data recovery and backups: [Backing Up and Restoring Data][6]
# Versioning
The `latest` tag matches the most recent release of Atlassian Crowd. Thus `atlassian/crowd:latest` will use the newest version of Crowd available.
Alternatively you can use a specific major, major.minor, or major.minor.patch version of Crowd by using a version number tag:
* `atlassian/crowd:3`
* `atlassian/crowd:3.2`
* `atlassian/crowd:3.2.3`
All versions from 3.0+ are available
# Supported JDK versions
All the Atlassian Docker images are now JDK11 only, and generated from the
[official Eclipse Temurin OpenJDK Docker images](https://hub.docker.com/_/eclipse-temurin).
The Docker images follow the [Atlassian Support end-of-life
policy](https://confluence.atlassian.com/support/atlassian-support-end-of-life-policy-201851003.html);
images for unsupported versions of the products remain available but will no longer
receive updates or fixes.
Historically, we have also generated other versions of the images, including
JDK8, Alpine, and 'slim' versions of the JDK. These legacy images still exist in
Docker Hub, however they should be considered deprecated, and do not receive
updates or fixes.
If for some reason you need a different version, see "Building your own image"
above.
# Supported architectures
Currently the Atlassian Docker images are built for the `linux/amd64` target
platform; we do not have other architectures on our roadmap at this
point. However the Dockerfiles and support tooling have now had all
architecture-specific components removed, so if necessary it is possible to
build images for any platform supported by Docker.
## Building on the target architecture
Note: This method is known to work on Mac M1 and AWS ARM64 machines, but has not
be extensively tested.
The simplest method of getting a platform image is to build it on a target
machine. The following assumes you have git and Docker installed. You will also
need to know which version of Crowd you want to build; substitute
`CROWD_VERSION=x.x.x` with your required version:
```
git clone --recurse-submodule https://bitbucket.org/atlassian-docker/docker-atlassian-crowd.git
cd docker-atlassian-crowd
docker build --tag my-image --build-arg CROWD_VERSION=x.x.x .
```
This image can be pushed up to your own Docker Hub or private repository.
# Troubleshooting
These images include built-in scripts to assist in performing common JVM diagnostic tasks.
## Thread dumps
`/opt/atlassian/support/thread-dumps.sh` can be run via `docker exec` to easily trigger the collection of thread
dumps from the containerized application. For example:
docker exec my_crowd /opt/atlassian/support/thread-dumps.sh
By default this script will collect 10 thread dumps at 5 second intervals. This can
be overridden by passing a custom value for the count and interval, by using `-c` / `--count`
and `-i` / `--interval` respectively. For example, to collect 20 thread dumps at 3 second intervals:
docker exec my_container /opt/atlassian/support/thread-dumps.sh --count 20 --interval 3
Thread dumps will be written to `$APP_HOME/thread_dumps/<date>`.
Note: By default this script will also capture output from top run in 'Thread-mode'. This can
be disabled by passing `-n` / `--no-top`
## Heap dump
`/opt/atlassian/support/heap-dump.sh` can be run via `docker exec` to easily trigger the collection of a heap
dump from the containerized application. For example:
docker exec my_container /opt/atlassian/support/heap-dump.sh
A heap dump will be written to `$APP_HOME/heap.bin`. If a file already exists at this
location, use `-f` / `--force` to overwrite the existing heap dump file.
## Manual diagnostics
The `jcmd` utility is also included in these images and can be used by starting a `bash` shell
in the running container:
docker exec -it my_container /bin/bash
# Support
For product support, go to:
* https://support.atlassian.com/crowd/
You can also visit the [Atlassian Data Center on
Kubernetes](https://community.atlassian.com/t5/Atlassian-Data-Center-on/gh-p/DC_Kubernetes)
forum for discussion on running Atlassian Data Center products in containers.
# Changelog
For a detailed list of changes to the Docker image configuration see [the Git
commit history](https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/commits/).
# License
Copyright © 2019 Atlassian Corporation Pty Ltd.
Licensed under the Apache License, Version 2.0.
[1]: https://www.atlassian.com/software/crowd
[2]: https://confluence.atlassian.com/crowd/supported-platforms-191851.html
[3]: https://confluence.atlassian.com/crowd031/integrating-crowd-with-apache-949753124.html
[4]: https://confluence.atlassian.com/crowd/crowd-data-center-935372453.html
[5]: https://confluence.atlassian.com/crowd/installing-crowd-data-center-935369773.html
[6]: https://confluence.atlassian.com/crowd/backing-up-and-restoring-data-36470797.html
[7]: https://docs.docker.com/engine/security/userns-remap/

View File

@ -0,0 +1,47 @@
<?xml version="1.0" encoding="UTF-8"?>
<Server port="{{ atl_tomcat_mgmt_port | default('8020') }}" shutdown="SHUTDOWN">
<Service name="Catalina">
<Connector acceptCount="{{ atl_tomcat_acceptcount | default('100') }}"
connectionTimeout="{{ atl_tomcat_connectiontimeout | default('20000') }}"
disableUploadTimeout="true"
enableLookups="{{ atl_tomcat_enablelookups | default('false') }}"
protocol="{{ atl_tomcat_protocol | default('HTTP/1.1') }}"
maxHttpHeaderSize="{{ atl_tomcat_maxhttpheadersize | default('8192') }}"
maxThreads="{{ atl_tomcat_maxthreads | default('150') }}"
minSpareThreads="{{ atl_tomcat_minsparethreads | default('25') }}"
port="{{ atl_tomcat_port | default('8095') }}"
secure="{{ atl_tomcat_secure | default(catalina_connector_secure) | default('false') }}"
scheme="{{ atl_tomcat_scheme | default(catalina_connector_scheme) | default('http') }}"
proxyName="{{ atl_proxy_name | default(catalina_connector_proxyname) | default('') }}"
proxyPort="{{ atl_proxy_port | default(catalina_connector_proxyport) | default('') }}"
redirectPort="{{ atl_tomcat_redirectport | default('8443') }}"
useBodyEncodingForURI="true"
URIEncoding="UTF-8"
compression="on"
sendReasonPhrase="true"
compressableMimeType="text/html,text/xml,application/xml,text/plain,text/css,application/json,application/javascript,application/x-javascript" />
<Engine defaultHost="localhost" name="Catalina">
<Host appBase="webapps" autoDeploy="true" name="localhost" unpackWARs="true"/>
</Engine>
<!-- To connect to an external web server (typically Apache) -->
<!-- Define an AJP 1.3 Connector on port 8009 -->
<!--
<Connector port="8009" enableLookups="false" redirectPort="8443" protocol="AJP/1.3" />
-->
</Service>
<!-- Security listener. Documentation at /docs/config/listeners.html
<Listener className="org.apache.catalina.security.SecurityListener" />
-->
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
</Server>

View File

@ -0,0 +1,26 @@
version: '3.9'
services:
app:
image: "epicmorg/crowd:latest"
build:
context: .
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
app-jdk11:
image: "epicmorg/crowd:latest-jdk11"
build:
context: .
dockerfile: Dockerfile.jdk11
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
# app-jdk17:
# image: "epicmorg/crowd:latest-jdk17"
# build:
# context: .
# dockerfile: Dockerfile.jdk17
# args:
# RELEASE: ${RELEASE}
# DOWNLOAD_URL: ${DOWNLOAD_URL}
#

View File

@ -0,0 +1,13 @@
#!/usr/bin/python3 -B
from entrypoint_helpers import env, gen_cfg, gen_container_id, exec_app
RUN_USER = env['run_user']
RUN_GROUP = env['run_group']
CROWD_INSTALL_DIR = env['crowd_install_dir']
CROWD_HOME = env['crowd_home']
gen_cfg('server.xml.j2', f'{CROWD_INSTALL_DIR}/apache-tomcat/conf/server.xml')
exec_app([f'{CROWD_INSTALL_DIR}/start_crowd.sh', '-fg'], CROWD_HOME, name='Crowd', env_cleanup=True)

View File

@ -0,0 +1,5 @@
__pycache__/
.pytest_cache/
Pipfile
Pipfile.lock
*.pyc

View File

@ -0,0 +1,13 @@
Copyright © 2019 Atlassian Corporation Pty Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You may
obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.

View File

@ -0,0 +1,36 @@
# Overview
This repository provides common utilities & components for building and testing Docker
images for Atlassian's Server and Data Center products.
The following components are provided:
### Image builds
* Support tools
Scripts for performing common diagnostic operations, i.e. taking thread dumps and heap
dumps.
* Entrypoint helpers
Common components for bootstrapping and starting apps.
* README publishing
Utility for publishing README's to Docker Hub, without relying on Docker Hub's own
automated builds.
### Image testing
* Fixtures
Common testing fixtures that can be reused for testing Docker builds of Atlassian
apps.
* Helpers
Helper functions for parsing configuration files, checking running processes and
retrieving http responses.

View File

@ -0,0 +1,146 @@
import sys
import os
import pwd
import shutil
import logging
import jinja2 as j2
import uuid
import base64
logging.basicConfig(level=logging.DEBUG)
######################################################################
# Setup inputs and outputs
# Import all ATL_* and Dockerfile environment variables. We lower-case
# these for compatability with Ansible template convention. We also
# support CATALINA variables from older versions of the Docker images
# for backwards compatability, if the new version is not set.
env = {k.lower(): v
for k, v in os.environ.items()}
# Setup Jinja2 for templating
jenv = j2.Environment(
loader=j2.FileSystemLoader('/opt/atlassian/etc/'),
autoescape=j2.select_autoescape(['xml']))
######################################################################
# Utils
def set_perms(path, user, group, mode):
try:
shutil.chown(path, user=user, group=group)
except PermissionError:
logging.warning(f"Could not chown path {path} to {user}:{group} due to insufficient permissions.")
try:
os.chmod(path, mode)
except PermissionError:
logging.warning(f"Could not chmod path {path} to {mode} due to insufficient permissions.")
def set_tree_perms(path, user, group, mode):
set_perms(path, user, group, mode)
for dirpath, dirnames, filenames in os.walk(path):
set_perms(path, user, group, mode)
for filename in filenames:
set_perms(path, user, group, mode)
def check_perms(path, uid, gid, mode):
stat = os.stat(path)
return all([
stat.st_uid == int(uid),
stat.st_gid == int(gid),
stat.st_mode & mode == mode
])
def gen_cfg(tmpl, target, user='root', group='root', mode=0o644, overwrite=True):
if not overwrite and os.path.exists(target):
logging.info(f"{target} exists; skipping.")
return
logging.info(f"Generating {target} from template {tmpl}")
cfg = jenv.get_template(tmpl).render(env)
try:
with open(target, 'w') as fd:
fd.write(cfg)
except (OSError, PermissionError):
logging.warning(f"Permission problem writing '{target}'; skipping")
else:
set_tree_perms(target, user, group, mode)
def gen_container_id():
env['uuid'] = uuid.uuid4().hex
with open('/etc/container_id') as fd:
lcid = fd.read()
if lcid != '':
env['local_container_id'] = lcid
def str2bool(v):
if str(v).lower() in ('yes', 'true', 't', 'y', '1'):
return True
return False
def unset_secure_vars():
secure_keywords = ('PASS', 'SECRET', 'TOKEN')
for key in os.environ:
if any(kw in key.upper() for kw in secure_keywords):
logging.warning(f"Unsetting environment var {key}")
del os.environ[key]
######################################################################
# Application startup utilities
def check_permissions(home_dir):
"""Ensure the home directory is set to minimal permissions"""
if str2bool(env.get('set_permissions') or True) and check_perms(home_dir, env['run_uid'], env['run_gid'], 0o700) is False:
set_tree_perms(home_dir, env['run_user'], env['run_group'], 0o700)
logging.info(f"User is currently root. Will change directory ownership and downgrade run user to {env['run_user']}")
def drop_root(run_user):
logging.info(f"User is currently root. Will downgrade run user to {run_user}")
pwd_entry = pwd.getpwnam(run_user)
os.environ['USER'] = run_user
os.environ['HOME'] = pwd_entry.pw_dir
os.environ['SHELL'] = pwd_entry.pw_shell
os.environ['LOGNAME'] = run_user
os.setgid(pwd_entry.pw_gid)
os.setuid(pwd_entry.pw_uid)
def write_pidfile():
app_home = env[f"{env['app_name'].lower()}_home"]
pidfile = f"{app_home}/docker-app.pid"
with open(pidfile, 'wt', encoding='utf-8') as fd:
pid = os.getpid()
fd.write(str(pid))
def exec_app(start_cmd_v, home_dir, name='app', env_cleanup=False):
"""Run the supplied application startup command.
Arguments:
start_cmd -- A list of the command and its arguments.
home_dir -- Application home directory.
name -- (Optional) The name to display in the log message.
env_cleanup -- (Default: False) Remove possibly sensitive env-vars.
"""
if os.getuid() == 0:
check_permissions(home_dir)
drop_root(env['run_user'])
write_pidfile()
if env_cleanup:
unset_secure_vars()
cmd = start_cmd_v[0]
args = start_cmd_v
logging.info(f"Running {name} with command '{cmd}', arguments {args}")
os.execv(cmd, args)

View File

@ -0,0 +1,63 @@
# -------------------------------------------------------------------------------------
# Common bootstrapping for support scripts (get app details: home directory, PID, etc.)
# -------------------------------------------------------------------------------------
# Set up Java utils
JCMD="${JAVA_HOME}/bin/jcmd"
# Set up app info
APP_NAME="$(set | grep '_INSTALL_DIR' | awk -F'_' '{print $1}')"
case "${APP_NAME}" in
BITBUCKET )
BOOTSTRAP_PROC="com.atlassian.bitbucket.internal.launcher.BitbucketServerLauncher"
;;
* )
BOOTSTRAP_PROC="org.apache.catalina.startup.Bootstrap"
;;
esac
# Get value of <app>_INSTALL_DIR
function get_app_install_dir {
local APP_INSTALL_DIR=${APP_NAME}_INSTALL_DIR
echo ${!APP_INSTALL_DIR}
}
# Get value of <app>_HOME
function get_app_home {
local APP_HOME=${APP_NAME}_HOME
echo ${!APP_HOME}
}
# Get app PID. APP_PID is the root process. JVM_APP_PID will generally
# be the same as APP_PID; the exception is Bitbucket running with
# Elasticsearch enabled.
JVM_APP_PID=$(${JCMD} | grep "${BOOTSTRAP_PROC}" | awk '{print $1}')
PIDFILE="$(get_app_home)/docker-app.pid"
if [[ -f $PIDFILE ]]; then
APP_PID=$(<$PIDFILE)
else
APP_PID=$JVM_APP_PID
fi
# Set valid getopt options
function set_valid_options {
OPTS=$(getopt -o "$1" --long "$2" -n 'parse-options' -- "$@")
if [ $? != 0 ]; then
echo "Failed parsing options." >&2
exit 1
fi
eval set -- "$OPTS"
}
# Run command(s)
function run_as_runuser {
if [ $(id -u) = 0 ]; then
su "${RUN_USER}" -c '"$@"' -- argv0 "$@"
else
$@
fi
}

View File

@ -0,0 +1,62 @@
#!/bin/bash
# -------------------------------------------------------------------------------------
# Heap collector for containerized Atlassian applications
#
# This script can be run via `docker exec` to easily trigger the collection of a heap
# dump from the containerized application. For example:
#
# $ docker exec -it my_jira /opt/atlassian/support/heap-dump.sh
#
# A heap dump will be written to $APP_HOME/heap.bin. If a file already exists at this
# location, use -f/--force to overwrite the existing heap dump file.
#
# -------------------------------------------------------------------------------------
set -euo pipefail
# Set up common vars like APP_NAME, APP_HOME, APP_PID
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
# Set up script opts
set_valid_options "f" "force"
# Set defaults
OVERWRITE="false"
# Parse opts
while true; do
case "${1-}" in
-f | --force ) OVERWRITE="true"; shift ;;
* ) break ;;
esac
done
echo "Atlassian heap dump collector"
echo "App: ${APP_NAME}"
echo "Run user: ${RUN_USER}"
echo
OUT_FILE="$(get_app_home)/heap.bin"
if [[ -f "${OUT_FILE}" ]]; then
echo "A previous heap dump already exists at ${OUT_FILE}."
if [[ "${OVERWRITE}" == "true" ]]; then
echo "Removing previous heap dump file"
echo
rm "${OUT_FILE}"
else
echo "Use -f/--force to overwrite the existing heap dump."
exit
fi
fi
echo "Generating heap dump"
run_as_runuser ${JCMD} ${JVM_APP_PID} GC.heap_dump -all ${OUT_FILE} > /dev/null
echo
echo "Heap dump has been written to ${OUT_FILE}"

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Send the specified signal to the main application process.
#
# If 'wait' is added as a second parameter, wait for the process to
# terminate. NOTE: This waits indefinitely, but may be killed by
# higher-level processes (e.g. Docker/Kubernetes)
set -e
SIG=$1
WAIT=$2
SHDIR=$(dirname $0)
source ${SHDIR}/common.sh
kill -${SIG} ${JVM_APP_PID}
if [[ "${WAIT}" == "wait" ]]; then
${SHDIR}/wait-pid.sh $JVM_APP_PID
fi

View File

@ -0,0 +1,74 @@
#!/bin/bash
# -------------------------------------------------------------------------------------
# Thread dumps collector for containerized Atlassian applications
#
# This script can be run via `docker exec` to easily trigger the collection of thread
# dumps from the containerized application. For example:
#
# $ docker exec my_jira /opt/atlassian/support/thread-dumps.sh
#
# By default this script will collect 10 thread dumps at a 5 second interval. This can
# be overridden by passing a custom value for the count and interval, respectively. For
# example, to collect 20 thread dumps at a 3 second interval:
#
# $ docker exec my_jira /opt/atlassian/support/thread-dumps.sh -c 20 -i 3
#
# Note: By default this script will capture output from top run in 'Thread-mode'. This can
# be disabled by passing --no-top
# -------------------------------------------------------------------------------------
set -euo pipefail
# Set up common vars like APP_NAME, APP_HOME, APP_PID
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
# Set up script opts
set_valid_options "c:i:n" "count:,interval:,no-top"
# Set defaults
COUNT="10"
INTERVAL="5"
NO_TOP="false"
# Parse opts
while true; do
case "${1-}" in
-c | --count ) COUNT="$2"; shift 2 ;;
-i | --interval ) INTERVAL="$2"; shift 2 ;;
-n | --no-top ) NO_TOP="true"; shift ;;
* ) break ;;
esac
done
echo "Atlassian thread dump collector"
echo "App: ${APP_NAME}"
echo "Run user: ${RUN_USER}"
echo
echo "${COUNT} thread dumps will be generated at a ${INTERVAL} second interval"
if [[ "${NO_TOP}" == "false" ]]; then
echo "top 'Threads-mode' output will also be collected for ${APP_NAME} with every thread dump"
fi
echo
OUT_DIR="$(get_app_home)/thread_dumps/$(date +'%Y-%m-%d_%H-%M-%S')"
run_as_runuser mkdir -p ${OUT_DIR}
for i in $(seq ${COUNT}); do
echo "Generating thread dump ${i} of ${COUNT}"
if [[ "${NO_TOP}" == "false" ]]; then
run_as_runuser top -b -H -p $JVM_APP_PID -n 1 > "${OUT_DIR}/${APP_NAME}_CPU_USAGE.$(date +%s).txt"
fi
run_as_runuser ${JCMD} ${JVM_APP_PID} Thread.print -l > "${OUT_DIR}/${APP_NAME}_THREADS.$(date +%s).txt"
if [[ ! "${i}" == "${COUNT}" ]]; then
sleep ${INTERVAL}
fi
done
echo
echo "Thread dumps have been written to ${OUT_DIR}"

View File

@ -0,0 +1,13 @@
#!/bin/bash
# Wait for the specified process ID to terminate. NOTE: This waits
# indefinitely, but may be killed by higher-level processes
# (e.g. Docker/Kubernetes)
set -e
WAIT_PID=$1
while [[ -e /proc/${WAIT_PID} ]]; do
/bin/sleep 0.5
done

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
host=$1
port=$2
secs=120
echo -n "Waiting for TCP connection to $host:$port..."
for i in `seq $secs`; do
if nc -z $host $port > /dev/null ; then
echo OK
exit 0
fi
echo -n .
/bin/sleep 1
done
echo FAILED
exit -1

View File

@ -0,0 +1,30 @@
#!/bin/bash
##############################################################################
#
# This script will initiate a clean shutdown of the application, and
# then wait for the process to finish before returning. This is
# primarily intended for use in environments that provide an orderly
# shutdown mechanism, in particular the Kubernetes `preStop` hook.
#
# This script will wait for the process to exit indefinitely; however
# most run-time tools (including Docker and Kubernetes) have their own
# shutdown timeouts that will send a SIGKILL if the grace period is
# exceeded.
#
##############################################################################
set -e
source /opt/atlassian/support/common.sh
echo "Shutting down Crowd..."
echo ${JVM_APP_PID} > ${CROWD_INSTALL_DIR}/apache-tomcat/work/catalina.pid
if [[ "${UID}" == 0 ]]; then
/bin/su ${RUN_USER} -c ${CROWD_INSTALL_DIR}/stop_crowd.sh;
else
${CROWD_INSTALL_DIR}/stop_crowd.sh;
fi
/opt/atlassian/support/wait-pid.sh ${JVM_APP_PID}

View File

@ -0,0 +1,3 @@
RELEASE=9.6.0
DOWNLOAD_URL=https://www.atlassian.com/software/jira/downloads/binary/atlassian-jira-software-9.6.0.tar.gz

View File

@ -0,0 +1,49 @@
FROM epicmorg/debian:bullseye-jdk8
LABEL maintainer="Atlassian Jira Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
# https://confluence.atlassian.com/display/JSERVERM/Important+directories+and+files
ENV JIRA_HOME /var/atlassian/application-data/jira
ENV JIRA_INSTALL_DIR /opt/atlassian/jira
VOLUME ["${JIRA_HOME}"]
WORKDIR $JIRA_HOME
# Expose HTTP port
EXPOSE 8080
##################################################################
# Installing
##################################################################
RUN mkdir -p ${JIRA_INSTALL_DIR} \
&& curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "$JIRA_INSTALL_DIR" \
&& chown -R ${RUN_USER}:${RUN_GROUP} ${JIRA_INSTALL_DIR}/ \
&& sed -i -e 's/^JVM_SUPPORT_RECOMMENDED_ARGS=""$/: \${JVM_SUPPORT_RECOMMENDED_ARGS:=""}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/^JVM_\(.*\)_MEMORY="\(.*\)"$/: \${JVM_\1_MEMORY:=\2}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/grep "java version"/grep -E "(openjdk|java) version"/g' ${JIRA_INSTALL_DIR}/bin/check-java.sh \
&& sed -i -e 's/port="8080"/port="8080" secure="${catalinaConnectorSecure}" scheme="${catalinaConnectorScheme}" proxyName="${catalinaConnectorProxyName}" proxyPort="${catalinaConnectorProxyPort}"/' ${JIRA_INSTALL_DIR}/conf/server.xml && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
CMD ["/entrypoint.sh", "-fg"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.sh /entrypoint.sh
COPY . /tmp

View File

@ -0,0 +1,49 @@
FROM epicmorg/debian:bullseye-jdk11
LABEL maintainer="Atlassian Jira Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
# https://confluence.atlassian.com/display/JSERVERM/Important+directories+and+files
ENV JIRA_HOME /var/atlassian/application-data/jira
ENV JIRA_INSTALL_DIR /opt/atlassian/jira
VOLUME ["${JIRA_HOME}"]
WORKDIR $JIRA_HOME
# Expose HTTP port
EXPOSE 8080
##################################################################
# Installing
##################################################################
RUN mkdir -p ${JIRA_INSTALL_DIR} \
&& curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "$JIRA_INSTALL_DIR" \
&& chown -R ${RUN_USER}:${RUN_GROUP} ${JIRA_INSTALL_DIR}/ \
&& sed -i -e 's/^JVM_SUPPORT_RECOMMENDED_ARGS=""$/: \${JVM_SUPPORT_RECOMMENDED_ARGS:=""}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/^JVM_\(.*\)_MEMORY="\(.*\)"$/: \${JVM_\1_MEMORY:=\2}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/grep "java version"/grep -E "(openjdk|java) version"/g' ${JIRA_INSTALL_DIR}/bin/check-java.sh \
&& sed -i -e 's/port="8080"/port="8080" secure="${catalinaConnectorSecure}" scheme="${catalinaConnectorScheme}" proxyName="${catalinaConnectorProxyName}" proxyPort="${catalinaConnectorProxyPort}"/' ${JIRA_INSTALL_DIR}/conf/server.xml && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
CMD ["/entrypoint.sh", "-fg"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.sh /entrypoint.sh
COPY . /tmp

View File

@ -0,0 +1,49 @@
FROM epicmorg/debian:bullseye-jdk17
LABEL maintainer="Atlassian Jira Server Team; EpicMorg DevTeam, developer@epicm.org"
ARG DEBIAN_FRONTEND=noninteractive
##################################################################
# ARGuments
##################################################################
#configured by dockerfile / .ENV
ARG RELEASE
ARG DOWNLOAD_URL
##################################################################
# Setup
##################################################################
ENV RUN_USER daemon
ENV RUN_GROUP daemon
# https://confluence.atlassian.com/display/JSERVERM/Important+directories+and+files
ENV JIRA_HOME /var/atlassian/application-data/jira
ENV JIRA_INSTALL_DIR /opt/atlassian/jira
VOLUME ["${JIRA_HOME}"]
WORKDIR $JIRA_HOME
# Expose HTTP port
EXPOSE 8080
##################################################################
# Installing
##################################################################
RUN mkdir -p ${JIRA_INSTALL_DIR} \
&& curl -L ${DOWNLOAD_URL} | tar -xz --strip-components=1 -C "$JIRA_INSTALL_DIR" \
&& chown -R ${RUN_USER}:${RUN_GROUP} ${JIRA_INSTALL_DIR}/ \
&& sed -i -e 's/^JVM_SUPPORT_RECOMMENDED_ARGS=""$/: \${JVM_SUPPORT_RECOMMENDED_ARGS:=""}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/^JVM_\(.*\)_MEMORY="\(.*\)"$/: \${JVM_\1_MEMORY:=\2}/g' ${JIRA_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/grep "java version"/grep -E "(openjdk|java) version"/g' ${JIRA_INSTALL_DIR}/bin/check-java.sh \
&& sed -i -e 's/port="8080"/port="8080" secure="${catalinaConnectorSecure}" scheme="${catalinaConnectorScheme}" proxyName="${catalinaConnectorProxyName}" proxyPort="${catalinaConnectorProxyPort}"/' ${JIRA_INSTALL_DIR}/conf/server.xml && \
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \
apt clean -y && \
apt autoclean -y && \
rm -rfv /var/lib/apt/lists/* && \
rm -rfv /var/cache/apt/archives/*.deb
CMD ["/entrypoint.sh", "-fg"]
ENTRYPOINT ["/usr/bin/tini", "--"]
COPY entrypoint.sh /entrypoint.sh
COPY . /tmp

View File

@ -0,0 +1,19 @@
all: app
app:
make build
make deploy
make clean
build:
docker-compose build --compress --parallel --progress plain
deploy:
docker-compose push
clean:
docker container prune -f
docker image prune -f
docker network prune -f
docker volume prune -f
docker system prune -af

View File

@ -0,0 +1,25 @@
version: '3.9'
services:
app:
image: "epicmorg/jira:${RELEASE}"
build:
context: .
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
app-jdk11:
image: "epicmorg/jira:${RELEASE}-jdk11"
build:
context: .
dockerfile: Dockerfile.jdk11
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}
app-jdk17:
image: "epicmorg/jira:${RELEASE}-jdk17"
build:
context: .
dockerfile: Dockerfile.jdk17
args:
RELEASE: ${RELEASE}
DOWNLOAD_URL: ${DOWNLOAD_URL}

View File

@ -0,0 +1,89 @@
#!/bin/bash
set -euo pipefail
export JAVA_HOME=$(readlink -f /usr/bin/javac | sed "s:/bin/javac::")
export JRE_HOME="$JAVA_HOME/jre"
export JAVA_BINARY="$JRE_HOME/bin/java"
export JAVA_VERSION=$("$JAVA_BINARY" -version 2>&1 | awk -F '"' '/version/ {print $2}')
# Setup Catalina Opts
: ${CATALINA_CONNECTOR_PROXYNAME:=}
: ${CATALINA_CONNECTOR_PROXYPORT:=}
: ${CATALINA_CONNECTOR_SCHEME:=http}
: ${CATALINA_CONNECTOR_SECURE:=false}
: ${CATALINA_OPTS:=}
: ${JAVA_OPTS:=}
CATALINA_OPTS="${CATALINA_OPTS} -DcatalinaConnectorProxyName=${CATALINA_CONNECTOR_PROXYNAME}"
CATALINA_OPTS="${CATALINA_OPTS} -DcatalinaConnectorProxyPort=${CATALINA_CONNECTOR_PROXYPORT}"
CATALINA_OPTS="${CATALINA_OPTS} -DcatalinaConnectorScheme=${CATALINA_CONNECTOR_SCHEME}"
CATALINA_OPTS="${CATALINA_OPTS} -DcatalinaConnectorSecure=${CATALINA_CONNECTOR_SECURE}"
export JAVA_OPTS="${JAVA_OPTS} ${CATALINA_OPTS}"
# Setup Data Center configuration
if [ ! -f "/etc/container_id" ]; then
uuidgen > /etc/container_id
fi
CONTAINER_ID=$(cat /etc/container_id)
CONTAINER_SHORT_ID=${CONTAINER_ID::8}
: ${CLUSTERED:=false}
: ${JIRA_NODE_ID:=jira_node_${CONTAINER_SHORT_ID}}
: ${JIRA_SHARED_HOME:=${JIRA_HOME}/shared}
: ${EHCACHE_PEER_DISCOVERY:=}
: ${EHCACHE_LISTENER_HOSTNAME:=}
: ${EHCACHE_LISTENER_PORT:=}
: ${EHCACHE_LISTENER_SOCKETTIMEOUTMILLIS:=}
: ${EHCACHE_MULTICAST_ADDRESS:=}
: ${EHCACHE_MULTICAST_PORT:=}
: ${EHCACHE_MULTICAST_TIMETOLIVE:=}
: ${EHCACHE_MULTICAST_HOSTNAME:=}
# Cleanly set/unset values in cluster.properties
function set_cluster_property {
if [ -z $2 ]; then
if [ -f "${JIRA_HOME}/cluster.properties" ]; then
sed -i -e "/^${1}/d" "${JIRA_HOME}/cluster.properties"
fi
return
fi
if [ ! -f "${JIRA_HOME}/cluster.properties" ]; then
echo "${1}=${2}" >> "${JIRA_HOME}/cluster.properties"
elif grep "^${1}" "${JIRA_HOME}/cluster.properties"; then
sed -i -e "s#^${1}=.*#${1}=${2}#g" "${JIRA_HOME}/cluster.properties"
else
echo "${1}=${2}" >> "${JIRA_HOME}/cluster.properties"
fi
}
if [ "${CLUSTERED}" == "true" ]; then
set_cluster_property "jira.node.id" "${JIRA_NODE_ID}"
set_cluster_property "jira.shared.home" "${JIRA_SHARED_HOME}"
set_cluster_property "ehcache.peer.discovery" "${EHCACHE_PEER_DISCOVERY}"
set_cluster_property "ehcache.listener.hostName" "${EHCACHE_LISTENER_HOSTNAME}"
set_cluster_property "ehcache.listener.port" "${EHCACHE_LISTENER_PORT}"
set_cluster_property "ehcache.listener.socketTimeoutMillis" "${EHCACHE_LISTENER_PORT}"
set_cluster_property "ehcache.multicast.address" "${EHCACHE_MULTICAST_ADDRESS}"
set_cluster_property "ehcache.multicast.port" "${EHCACHE_MULTICAST_PORT}"
set_cluster_property "ehcache.multicast.timeToLive" "${EHCACHE_MULTICAST_TIMETOLIVE}"
set_cluster_property "ehcache.multicast.hostName" "${EHCACHE_MULTICAST_HOSTNAME}"
fi
# Start Jira as the correct user
if [ "${UID}" -eq 0 ]; then
echo "User is currently root. Will change directory ownership to ${RUN_USER}:${RUN_GROUP}, then downgrade permission to ${RUN_USER}"
PERMISSIONS_SIGNATURE=$(stat -c "%u:%U:%a" "${JIRA_HOME}")
EXPECTED_PERMISSIONS=$(id -u ${RUN_USER}):${RUN_USER}:700
if [ "${PERMISSIONS_SIGNATURE}" != "${EXPECTED_PERMISSIONS}" ]; then
chmod -R 700 "${JIRA_HOME}" &&
chown -R "${RUN_USER}:${RUN_GROUP}" "${JIRA_HOME}"
fi
# Now drop privileges
exec su -s /bin/bash "${RUN_USER}" -c "$JIRA_INSTALL_DIR/bin/start-jira.sh $@"
else
exec "$JIRA_INSTALL_DIR/bin/start-jira.sh" "$@"
fi

View File

@ -1,3 +1,3 @@
RELEASE=9.2.0 RELEASE=9.6.0
DOWNLOAD_URL=https://www.atlassian.com/software/jira/downloads/binary/atlassian-jira-software-9.2.0.tar.gz DOWNLOAD_URL=https://www.atlassian.com/software/jira/downloads/binary/atlassian-jira-software-9.6.0.tar.gz