Merge branch 'master' into up-dependencies-20Sep2019

This commit is contained in:
Hossein Shafagh 2019-09-20 15:19:25 -07:00 committed by GitHub
commit 8c9a1df2cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1156 additions and 54 deletions

View File

@ -46,7 +46,7 @@ reset-db:
@echo "--> Enabling pg_trgm extension" @echo "--> Enabling pg_trgm extension"
psql lemur -c "create extension IF NOT EXISTS pg_trgm;" psql lemur -c "create extension IF NOT EXISTS pg_trgm;"
@echo "--> Applying migrations" @echo "--> Applying migrations"
lemur db upgrade cd lemur && lemur db upgrade
setup-git: setup-git:
@echo "--> Installing git hooks" @echo "--> Installing git hooks"

View File

@ -593,8 +593,60 @@ If you are not using a metric provider you do not need to configure any of these
Plugin Specific Options Plugin Specific Options
----------------------- -----------------------
Active Directory Certificate Services Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. data:: ADCS_SERVER
:noindex:
FQDN of your ADCS Server
.. data:: ADCS_AUTH_METHOD
:noindex:
The chosen authentication method. Either basic (the default), ntlm or cert (SSL client certificate). The next 2 variables are interpreted differently for different methods.
.. data:: ADCS_USER
:noindex:
The username (basic) or the path to the public cert (cert) of the user accessing PKI
.. data:: ADCS_PWD
:noindex:
The passwd (basic) or the path to the private key (cert) of the user accessing PKI
.. data:: ADCS_TEMPLATE
:noindex:
Template to be used for certificate issuing. Usually display name w/o spaces
.. data:: ADCS_START
:noindex:
.. data:: ADCS_STOP
:noindex:
.. data:: ADCS_ISSUING
:noindex:
Contains the issuing cert of the CA
.. data:: ADCS_ROOT
:noindex:
Contains the root cert of the CA
Verisign Issuer Plugin Verisign Issuer Plugin
^^^^^^^^^^^^^^^^^^^^^^ ~~~~~~~~~~~~~~~~~~~~~~
Authorities will each have their own configuration options. There is currently just one plugin bundled with Lemur, Authorities will each have their own configuration options. There is currently just one plugin bundled with Lemur,
Verisign/Symantec. Additional plugins may define additional options. Refer to the plugin's own documentation Verisign/Symantec. Additional plugins may define additional options. Refer to the plugin's own documentation
@ -642,7 +694,7 @@ for those plugins.
Digicert Issuer Plugin Digicert Issuer Plugin
^^^^^^^^^^^^^^^^^^^^^^ ~~~~~~~~~~~~~~~~~~~~~~
The following configuration properties are required to use the Digicert issuer plugin. The following configuration properties are required to use the Digicert issuer plugin.
@ -690,7 +742,7 @@ The following configuration properties are required to use the Digicert issuer p
CFSSL Issuer Plugin CFSSL Issuer Plugin
^^^^^^^^^^^^^^^^^^^ ~~~~~~~~~~~~~~~~~~~
The following configuration properties are required to use the CFSSL issuer plugin. The following configuration properties are required to use the CFSSL issuer plugin.
@ -716,7 +768,7 @@ The following configuration properties are required to use the CFSSL issuer plug
Hashicorp Vault Source/Destination Plugin Hashicorp Vault Source/Destination Plugin
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lemur can import and export certificate data to and from a Hashicorp Vault secrets store. Lemur can connect to a different Vault service per source/destination. Lemur can import and export certificate data to and from a Hashicorp Vault secrets store. Lemur can connect to a different Vault service per source/destination.
@ -738,7 +790,7 @@ Vault Destination supports a regex filter to prevent certificates with SAN that
AWS Source/Destination Plugin AWS Source/Destination Plugin
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order for Lemur to manage its own account and other accounts we must ensure it has the correct AWS permissions. In order for Lemur to manage its own account and other accounts we must ensure it has the correct AWS permissions.

View File

@ -9,6 +9,7 @@ command: celery -A lemur.common.celery worker --loglevel=info -l DEBUG -B
""" """
import copy import copy
import sys import sys
import time
from datetime import datetime, timezone, timedelta from datetime import datetime, timezone, timedelta
from celery import Celery from celery import Celery
@ -16,6 +17,7 @@ from celery.exceptions import SoftTimeLimitExceeded
from flask import current_app from flask import current_app
from lemur.authorities.service import get as get_authority from lemur.authorities.service import get as get_authority
from lemur.common.redis import RedisHandler
from lemur.destinations import service as destinations_service from lemur.destinations import service as destinations_service
from lemur.extensions import metrics, sentry from lemur.extensions import metrics, sentry
from lemur.factory import create_app from lemur.factory import create_app
@ -24,12 +26,19 @@ from lemur.pending_certificates import service as pending_certificate_service
from lemur.plugins.base import plugins from lemur.plugins.base import plugins
from lemur.sources.cli import clean, sync, validate_sources from lemur.sources.cli import clean, sync, validate_sources
from lemur.sources.service import add_aws_destination_to_sources from lemur.sources.service import add_aws_destination_to_sources
from lemur.certificates import cli as cli_certificate
from lemur.dns_providers import cli as cli_dns_providers
from lemur.notifications import cli as cli_notification
from lemur.endpoints import cli as cli_endpoints
if current_app: if current_app:
flask_app = current_app flask_app = current_app
else: else:
flask_app = create_app() flask_app = create_app()
red = RedisHandler().redis()
def make_celery(app): def make_celery(app):
celery = Celery( celery = Celery(
@ -57,6 +66,9 @@ celery = make_celery(flask_app)
def is_task_active(fun, task_id, args): def is_task_active(fun, task_id, args):
from celery.task.control import inspect from celery.task.control import inspect
if not args:
args = '()' # empty args
i = inspect() i = inspect()
active_tasks = i.active() active_tasks = i.active()
for _, tasks in active_tasks.items(): for _, tasks in active_tasks.items():
@ -68,6 +80,45 @@ def is_task_active(fun, task_id, args):
return False return False
@celery.task()
def report_celery_last_success_metrics():
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
Admins can then alert when tasks are not ran when intended. Admins should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "recurrent task",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_time = int(time.time())
schedule = current_app.config.get('CELERYBEAT_SCHEDULE')
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
metrics.send(f"{task}.time_since_last_success", 'gauge', current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=600) @celery.task(soft_time_limit=600)
def fetch_acme_cert(id): def fetch_acme_cert(id):
""" """
@ -80,8 +131,9 @@ def fetch_acme_cert(id):
if celery.current_task: if celery.current_task:
task_id = celery.current_task.request.id task_id = celery.current_task.request.id
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = { log_data = {
"function": "{}.{}".format(__name__, sys._getframe().f_code.co_name), "function": function,
"message": "Resolving pending certificate {}".format(id), "message": "Resolving pending certificate {}".format(id),
"task_id": task_id, "task_id": task_id,
"id": id, "id": id,
@ -165,24 +217,39 @@ def fetch_acme_cert(id):
log_data["failed"] = failed log_data["failed"] = failed
log_data["wrong_issuer"] = wrong_issuer log_data["wrong_issuer"] = wrong_issuer
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
metrics.send(f"{function}.resolved", 'gauge', new)
metrics.send(f"{function}.failed", 'gauge', failed)
metrics.send(f"{function}.wrong_issuer", 'gauge', wrong_issuer)
print( print(
"[+] Certificates: New: {new} Failed: {failed} Not using ACME: {wrong_issuer}".format( "[+] Certificates: New: {new} Failed: {failed} Not using ACME: {wrong_issuer}".format(
new=new, failed=failed, wrong_issuer=wrong_issuer new=new, failed=failed, wrong_issuer=wrong_issuer
) )
) )
red.set(f'{function}.last_success', int(time.time()))
@celery.task() @celery.task()
def fetch_all_pending_acme_certs(): def fetch_all_pending_acme_certs():
"""Instantiate celery workers to resolve all pending Acme certificates""" """Instantiate celery workers to resolve all pending Acme certificates"""
pending_certs = pending_certificate_service.get_unresolved_pending_certs()
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = { log_data = {
"function": "{}.{}".format(__name__, sys._getframe().f_code.co_name), "function": function,
"message": "Starting job.", "message": "Starting job.",
"task_id": task_id,
} }
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
pending_certs = pending_certificate_service.get_unresolved_pending_certs()
# We only care about certs using the acme-issuer plugin # We only care about certs using the acme-issuer plugin
for cert in pending_certs: for cert in pending_certs:
@ -195,11 +262,29 @@ def fetch_all_pending_acme_certs():
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
fetch_acme_cert.delay(cert.id) fetch_acme_cert.delay(cert.id)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task() @celery.task()
def remove_old_acme_certs(): def remove_old_acme_certs():
"""Prune old pending acme certificates from the database""" """Prune old pending acme certificates from the database"""
log_data = {"function": "{}.{}".format(__name__, sys._getframe().f_code.co_name)} function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "Starting job.",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
pending_certs = pending_certificate_service.get_pending_certs("all") pending_certs = pending_certificate_service.get_pending_certs("all")
# Delete pending certs more than a week old # Delete pending certs more than a week old
@ -211,6 +296,9 @@ def remove_old_acme_certs():
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
pending_certificate_service.delete(cert) pending_certificate_service.delete(cert)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task() @celery.task()
def clean_all_sources(): def clean_all_sources():
@ -218,15 +306,33 @@ def clean_all_sources():
This function will clean unused certificates from sources. This is a destructive operation and should only This function will clean unused certificates from sources. This is a destructive operation and should only
be ran periodically. This function triggers one celery task per source. be ran periodically. This function triggers one celery task per source.
""" """
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "Creating celery task to clean source",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
sources = validate_sources("all") sources = validate_sources("all")
for source in sources: for source in sources:
current_app.logger.debug( log_data["source"] = source.label
"Creating celery task to clean source {}".format(source.label) current_app.logger.debug(log_data)
)
clean_source.delay(source.label) clean_source.delay(source.label)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task()
@celery.task(soft_time_limit=600)
def clean_source(source): def clean_source(source):
""" """
This celery task will clean the specified source. This is a destructive operation that will delete unused This celery task will clean the specified source. This is a destructive operation that will delete unused
@ -235,8 +341,31 @@ def clean_source(source):
:param source: :param source:
:return: :return:
""" """
current_app.logger.debug("Cleaning source {}".format(source)) function = f"{__name__}.{sys._getframe().f_code.co_name}"
clean([source], True) task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "Cleaning source",
"source": source,
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, (source,)):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
clean([source], True)
except SoftTimeLimitExceeded:
log_data["message"] = "Clean source: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
@celery.task() @celery.task()
@ -244,13 +373,31 @@ def sync_all_sources():
""" """
This function will sync certificates from all sources. This function triggers one celery task per source. This function will sync certificates from all sources. This function triggers one celery task per source.
""" """
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "creating celery task to sync source",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
sources = validate_sources("all") sources = validate_sources("all")
for source in sources: for source in sources:
current_app.logger.debug( log_data["source"] = source.label
"Creating celery task to sync source {}".format(source.label) current_app.logger.debug(log_data)
)
sync_source.delay(source.label) sync_source.delay(source.label)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=7200) @celery.task(soft_time_limit=7200)
def sync_source(source): def sync_source(source):
@ -261,35 +408,39 @@ def sync_source(source):
:return: :return:
""" """
function = "{}.{}".format(__name__, sys._getframe().f_code.co_name) function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None task_id = None
if celery.current_task: if celery.current_task:
task_id = celery.current_task.request.id task_id = celery.current_task.request.id
log_data = { log_data = {
"function": function, "function": function,
"message": "Syncing source", "message": "Syncing source",
"source": source, "source": source,
"task_id": task_id, "task_id": task_id,
} }
current_app.logger.debug(log_data)
if task_id and is_task_active(function, task_id, (source,)): if task_id and is_task_active(function, task_id, (source,)):
log_data["message"] = "Skipping task: Task is already active" log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
return return
current_app.logger.debug(log_data)
try: try:
sync([source]) sync([source])
metrics.send(f"{function}.success", 'counter', 1, metric_tags={"source": source})
except SoftTimeLimitExceeded: except SoftTimeLimitExceeded:
log_data["message"] = "Error syncing source: Time limit exceeded." log_data["message"] = "Error syncing source: Time limit exceeded."
current_app.logger.error(log_data) current_app.logger.error(log_data)
sentry.captureException() sentry.captureException()
metrics.send( metrics.send("sync_source_timeout", "counter", 1, metric_tags={"source": source})
"sync_source_timeout", "counter", 1, metric_tags={"source": source} metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
)
return return
log_data["message"] = "Done syncing source" log_data["message"] = "Done syncing source"
current_app.logger.debug(log_data) current_app.logger.debug(log_data)
metrics.send(f"{function}.success", 'counter', 1, metric_tags={"source": source})
red.set(f'{function}.last_success', int(time.time()))
@celery.task() @celery.task()
@ -301,10 +452,251 @@ def sync_source_destination():
The destination sync_as_source_name reveals the name of the suitable source-plugin. The destination sync_as_source_name reveals the name of the suitable source-plugin.
We rely on account numbers to avoid duplicates. We rely on account numbers to avoid duplicates.
""" """
current_app.logger.debug("Syncing AWS destinations and sources") function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "syncing AWS destinations and sources",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
for dst in destinations_service.get_all(): for dst in destinations_service.get_all():
if add_aws_destination_to_sources(dst): if add_aws_destination_to_sources(dst):
current_app.logger.debug("Source: %s added", dst.label) log_data["message"] = "new source added"
log_data["source"] = dst.label
current_app.logger.debug(log_data)
current_app.logger.debug("Completed Syncing AWS destinations and sources") log_data["message"] = "completed Syncing AWS destinations and sources"
current_app.logger.debug(log_data)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=3600)
def certificate_reissue():
"""
This celery task reissues certificates which are pending reissue
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "reissuing certificates",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_certificate.reissue(None, True)
except SoftTimeLimitExceeded:
log_data["message"] = "Certificate reissue: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
log_data["message"] = "reissuance completed"
current_app.logger.debug(log_data)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=3600)
def certificate_rotate():
"""
This celery task rotates certificates which are reissued but having endpoints attached to the replaced cert
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "rotating certificates",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_certificate.rotate(None, None, None, None, True)
except SoftTimeLimitExceeded:
log_data["message"] = "Certificate rotate: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
log_data["message"] = "rotation completed"
current_app.logger.debug(log_data)
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=3600)
def endpoints_expire():
"""
This celery task removes all endpoints that have not been recently updated
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "endpoints expire",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_endpoints.expire(2) # Time in hours
except SoftTimeLimitExceeded:
log_data["message"] = "endpoint expire: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=600)
def get_all_zones():
"""
This celery syncs all zones from the available dns providers
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "refresh all zones from available DNS providers",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_dns_providers.get_all_zones()
except SoftTimeLimitExceeded:
log_data["message"] = "get all zones: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=3600)
def check_revoked():
"""
This celery task attempts to check if any certs are expired
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "check if any certificates are revoked revoked",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_certificate.check_revoked()
except SoftTimeLimitExceeded:
log_data["message"] = "Checking revoked: Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)
@celery.task(soft_time_limit=3600)
def notify_expirations():
"""
This celery task notifies about expiring certs
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
log_data = {
"function": function,
"message": "notify for cert expiration",
"task_id": task_id,
}
if task_id and is_task_active(function, task_id, None):
log_data["message"] = "Skipping task: Task is already active"
current_app.logger.debug(log_data)
return
current_app.logger.debug(log_data)
try:
cli_notification.expirations(current_app.config.get("EXCLUDE_CN_FROM_NOTIFICATION", []))
except SoftTimeLimitExceeded:
log_data["message"] = "Notify expiring Time limit exceeded."
current_app.logger.error(log_data)
sentry.captureException()
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
return
red.set(f'{function}.last_success', int(time.time()))
metrics.send(f"{function}.success", 'counter', 1)

52
lemur/common/redis.py Normal file
View File

@ -0,0 +1,52 @@
"""
Helper Class for Redis
"""
import redis
import sys
from flask import current_app
from lemur.extensions import sentry
from lemur.factory import create_app
if current_app:
flask_app = current_app
else:
flask_app = create_app()
class RedisHandler:
def __init__(self, host=flask_app.config.get('REDIS_HOST', 'localhost'),
port=flask_app.config.get('REDIS_PORT', 6379),
db=flask_app.config.get('REDIS_DB', 0)):
self.host = host
self.port = port
self.db = db
def redis(self, db=0):
# The decode_responses flag here directs the client to convert the responses from Redis into Python strings
# using the default encoding utf-8. This is client specific.
function = f"{__name__}.{sys._getframe().f_code.co_name}"
try:
red = redis.StrictRedis(host=self.host, port=self.port, db=self.db, encoding="utf-8", decode_responses=True)
red.set("test", 0)
except redis.ConnectionError:
log_data = {
"function": function,
"message": "Redis Connection error",
"host": self.host,
"port": self.port
}
current_app.logger.error(log_data)
sentry.captureException()
return red
def redis_get(key, default=None):
red = RedisHandler().redis()
try:
v = red.get(key)
except redis.exceptions.ConnectionError:
v = None
if not v:
return default
return v

View File

@ -98,6 +98,7 @@ def get_types():
], ],
}, },
{"name": "dyn"}, {"name": "dyn"},
{"name": "ultradns"},
] ]
}, },
) )

View File

@ -33,22 +33,22 @@ def get_dynect_session():
return dynect_session return dynect_session
def _has_dns_propagated(name, token): def _has_dns_propagated(fqdn, token):
txt_records = [] txt_records = []
try: try:
dns_resolver = dns.resolver.Resolver() dns_resolver = dns.resolver.Resolver()
dns_resolver.nameservers = [get_authoritative_nameserver(name)] dns_resolver.nameservers = [get_authoritative_nameserver(fqdn)]
dns_response = dns_resolver.query(name, "TXT") dns_response = dns_resolver.query(fqdn, "TXT")
for rdata in dns_response: for rdata in dns_response:
for txt_record in rdata.strings: for txt_record in rdata.strings:
txt_records.append(txt_record.decode("utf-8")) txt_records.append(txt_record.decode("utf-8"))
except dns.exception.DNSException: except dns.exception.DNSException:
metrics.send("has_dns_propagated_fail", "counter", 1) metrics.send("has_dns_propagated_fail", "counter", 1, metric_tags={"dns": fqdn})
return False return False
for txt_record in txt_records: for txt_record in txt_records:
if txt_record == token: if txt_record == token:
metrics.send("has_dns_propagated_success", "counter", 1) metrics.send("has_dns_propagated_success", "counter", 1, metric_tags={"dns": fqdn})
return True return True
return False return False
@ -61,12 +61,12 @@ def wait_for_dns_change(change_id, account_number=None):
status = _has_dns_propagated(fqdn, token) status = _has_dns_propagated(fqdn, token)
current_app.logger.debug("Record status for fqdn: {}: {}".format(fqdn, status)) current_app.logger.debug("Record status for fqdn: {}: {}".format(fqdn, status))
if status: if status:
metrics.send("wait_for_dns_change_success", "counter", 1) metrics.send("wait_for_dns_change_success", "counter", 1, metric_tags={"dns": fqdn})
break break
time.sleep(10) time.sleep(10)
if not status: if not status:
# TODO: Delete associated DNS text record here # TODO: Delete associated DNS text record here
metrics.send("wait_for_dns_change_fail", "counter", 1) metrics.send("wait_for_dns_change_fail", "counter", 1, metric_tags={"dns": fqdn})
sentry.captureException(extra={"fqdn": str(fqdn), "txt_record": str(token)}) sentry.captureException(extra={"fqdn": str(fqdn), "txt_record": str(token)})
metrics.send( metrics.send(
"wait_for_dns_change_error", "wait_for_dns_change_error",

View File

@ -31,7 +31,7 @@ from lemur.exceptions import InvalidAuthority, InvalidConfiguration, UnknownProv
from lemur.extensions import metrics, sentry from lemur.extensions import metrics, sentry
from lemur.plugins import lemur_acme as acme from lemur.plugins import lemur_acme as acme
from lemur.plugins.bases import IssuerPlugin from lemur.plugins.bases import IssuerPlugin
from lemur.plugins.lemur_acme import cloudflare, dyn, route53 from lemur.plugins.lemur_acme import cloudflare, dyn, route53, ultradns
class AuthorizationRecord(object): class AuthorizationRecord(object):
@ -294,7 +294,7 @@ class AcmeHandler(object):
if not dns_provider.domains: if not dns_provider.domains:
continue continue
for name in dns_provider.domains: for name in dns_provider.domains:
if domain.endswith("." + name): if name == domain or domain.endswith("." + name):
if len(name) > match_length: if len(name) > match_length:
self.dns_providers_for_domain[domain] = [dns_provider] self.dns_providers_for_domain[domain] = [dns_provider]
match_length = len(name) match_length = len(name)
@ -370,7 +370,12 @@ class AcmeHandler(object):
pass pass
def get_dns_provider(self, type): def get_dns_provider(self, type):
provider_types = {"cloudflare": cloudflare, "dyn": dyn, "route53": route53} provider_types = {
"cloudflare": cloudflare,
"dyn": dyn,
"route53": route53,
"ultradns": ultradns,
}
provider = provider_types.get(type) provider = provider_types.get(type)
if not provider: if not provider:
raise UnknownProvider("No such DNS provider: {}".format(type)) raise UnknownProvider("No such DNS provider: {}".format(type))
@ -424,7 +429,12 @@ class ACMEIssuerPlugin(IssuerPlugin):
def get_dns_provider(self, type): def get_dns_provider(self, type):
self.acme = AcmeHandler() self.acme = AcmeHandler()
provider_types = {"cloudflare": cloudflare, "dyn": dyn, "route53": route53} provider_types = {
"cloudflare": cloudflare,
"dyn": dyn,
"route53": route53,
"ultradns": ultradns,
}
provider = provider_types.get(type) provider = provider_types.get(type)
if not provider: if not provider:
raise UnknownProvider("No such DNS provider: {}".format(type)) raise UnknownProvider("No such DNS provider: {}".format(type))

View File

@ -1,8 +1,9 @@
import unittest import unittest
from requests.models import Response
from mock import MagicMock, Mock, patch from mock import MagicMock, Mock, patch
from lemur.plugins.lemur_acme import plugin from lemur.plugins.lemur_acme import plugin, ultradns
class TestAcme(unittest.TestCase): class TestAcme(unittest.TestCase):
@ -360,3 +361,121 @@ class TestAcme(unittest.TestCase):
mock_request_certificate.return_value = ("pem_certificate", "chain") mock_request_certificate.return_value = ("pem_certificate", "chain")
result = provider.create_certificate(csr, issuer_options) result = provider.create_certificate(csr, issuer_options)
assert result assert result
@patch("lemur.plugins.lemur_acme.ultradns.requests")
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
def test_get_ultradns_token(self, mock_current_app, mock_requests):
# ret_val = json.dumps({"access_token": "access"})
the_response = Response()
the_response._content = b'{"access_token": "access"}'
mock_requests.post = Mock(return_value=the_response)
mock_current_app.config.get = Mock(return_value="Test")
result = ultradns.get_ultradns_token()
self.assertTrue(len(result) > 0)
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
def test_create_txt_record(self, mock_current_app):
domain = "_acme_challenge.test.example.com"
zone = "test.example.com"
token = "ABCDEFGHIJ"
account_number = "1234567890"
change_id = (domain, token)
ultradns.get_zone_name = Mock(return_value=zone)
mock_current_app.logger.debug = Mock()
ultradns._post = Mock()
log_data = {
"function": "create_txt_record",
"fqdn": domain,
"token": token,
"message": "TXT record created"
}
result = ultradns.create_txt_record(domain, token, account_number)
mock_current_app.logger.debug.assert_called_with(log_data)
self.assertEqual(result, change_id)
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
@patch("lemur.extensions.metrics")
def test_delete_txt_record(self, mock_metrics, mock_current_app):
domain = "_acme_challenge.test.example.com"
zone = "test.example.com"
token = "ABCDEFGHIJ"
account_number = "1234567890"
change_id = (domain, token)
mock_current_app.logger.debug = Mock()
ultradns.get_zone_name = Mock(return_value=zone)
ultradns._post = Mock()
ultradns._get = Mock()
ultradns._get.return_value = {'zoneName': 'test.example.com.com',
'rrSets': [{'ownerName': '_acme-challenge.test.example.com.',
'rrtype': 'TXT (16)', 'ttl': 5, 'rdata': ['ABCDEFGHIJ']}],
'queryInfo': {'sort': 'OWNER', 'reverse': False, 'limit': 100},
'resultInfo': {'totalCount': 1, 'offset': 0, 'returnedCount': 1}}
ultradns._delete = Mock()
mock_metrics.send = Mock()
ultradns.delete_txt_record(change_id, account_number, domain, token)
mock_current_app.logger.debug.assert_not_called()
mock_metrics.send.assert_not_called()
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
@patch("lemur.extensions.metrics")
def test_wait_for_dns_change(self, mock_metrics, mock_current_app):
ultradns._has_dns_propagated = Mock(return_value=True)
nameserver = "1.1.1.1"
ultradns.get_authoritative_nameserver = Mock(return_value=nameserver)
mock_metrics.send = Mock()
domain = "_acme-challenge.test.example.com"
token = "ABCDEFGHIJ"
change_id = (domain, token)
mock_current_app.logger.debug = Mock()
ultradns.wait_for_dns_change(change_id)
# mock_metrics.send.assert_not_called()
log_data = {
"function": "wait_for_dns_change",
"fqdn": domain,
"status": True,
"message": "Record status on Public DNS"
}
mock_current_app.logger.debug.assert_called_with(log_data)
def test_get_zone_name(self):
zones = ['example.com', 'test.example.com']
zone = "test.example.com"
domain = "_acme-challenge.test.example.com"
account_number = "1234567890"
ultradns.get_zones = Mock(return_value=zones)
result = ultradns.get_zone_name(domain, account_number)
self.assertEqual(result, zone)
def test_get_zones(self):
account_number = "1234567890"
path = "a/b/c"
zones = ['example.com', 'test.example.com']
paginate_response = [{
'properties': {
'name': 'example.com.', 'accountName': 'example', 'type': 'PRIMARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}, {
'properties': {
'name': 'test.example.com.', 'accountName': 'example', 'type': 'PRIMARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}, {
'properties': {
'name': 'example2.com.', 'accountName': 'example', 'type': 'SECONDARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}]
ultradns._paginate = Mock(path, "zones")
ultradns._paginate.side_effect = [[paginate_response]]
result = ultradns.get_zones(account_number)
self.assertEqual(result, zones)

View File

@ -0,0 +1,445 @@
import time
import requests
import json
import sys
import dns
import dns.exception
import dns.name
import dns.query
import dns.resolver
from flask import current_app
from lemur.extensions import metrics, sentry
class Record:
"""
This class implements an Ultra DNS record.
Accepts the response from the API call as the argument.
"""
def __init__(self, _data):
# Since we are dealing with only TXT records for Lemur, we expect only 1 RRSet in the response.
# Thus we default to picking up the first entry (_data["rrsets"][0]) from the response.
self._data = _data["rrSets"][0]
@property
def name(self):
return self._data["ownerName"]
@property
def rrtype(self):
return self._data["rrtype"]
@property
def rdata(self):
return self._data["rdata"]
@property
def ttl(self):
return self._data["ttl"]
class Zone:
"""
This class implements an Ultra DNS zone.
"""
def __init__(self, _data, _client="Client"):
self._data = _data
self._client = _client
@property
def name(self):
"""
Zone name, has a trailing "." at the end, which we manually remove.
"""
return self._data["properties"]["name"][:-1]
@property
def authoritative_type(self):
"""
Indicates whether the zone is setup as a PRIMARY or SECONDARY
"""
return self._data["properties"]["type"]
@property
def record_count(self):
return self._data["properties"]["resourceRecordCount"]
@property
def status(self):
"""
Returns the status of the zone - ACTIVE, SUSPENDED, etc
"""
return self._data["properties"]["status"]
def get_ultradns_token():
"""
Function to call the UltraDNS Authorization API.
Returns the Authorization access_token which is valid for 1 hour.
Each request calls this function and we generate a new token every time.
"""
path = "/v2/authorization/token"
data = {
"grant_type": "password",
"username": current_app.config.get("ACME_ULTRADNS_USERNAME", ""),
"password": current_app.config.get("ACME_ULTRADNS_PASSWORD", ""),
}
base_uri = current_app.config.get("ACME_ULTRADNS_DOMAIN", "")
resp = requests.post(f"{base_uri}{path}", data=data, verify=True)
return resp.json()["access_token"]
def _generate_header():
"""
Function to generate the header for a request.
Contains the Authorization access_key obtained from the get_ultradns_token() function.
"""
access_token = get_ultradns_token()
return {"Authorization": f"Bearer {access_token}", "Content-Type": "application/json"}
def _paginate(path, key):
limit = 100
params = {"offset": 0, "limit": 1}
resp = _get(path, params)
for index in range(0, resp["resultInfo"]["totalCount"], limit):
params["offset"] = index
params["limit"] = limit
resp = _get(path, params)
yield resp[key]
def _get(path, params=None):
"""Function to execute a GET request on the given URL (base_uri + path) with given params"""
base_uri = current_app.config.get("ACME_ULTRADNS_DOMAIN", "")
resp = requests.get(
f"{base_uri}{path}",
headers=_generate_header(),
params=params,
verify=True,
)
resp.raise_for_status()
return resp.json()
def _delete(path):
"""Function to execute a DELETE request on the given URL"""
base_uri = current_app.config.get("ACME_ULTRADNS_DOMAIN", "")
resp = requests.delete(
f"{base_uri}{path}",
headers=_generate_header(),
verify=True,
)
resp.raise_for_status()
def _post(path, params):
"""Executes a POST request on given URL. Body is sent in JSON format"""
base_uri = current_app.config.get("ACME_ULTRADNS_DOMAIN", "")
resp = requests.post(
f"{base_uri}{path}",
headers=_generate_header(),
data=json.dumps(params),
verify=True,
)
resp.raise_for_status()
def _has_dns_propagated(name, token, domain):
"""
Check whether the DNS change made by Lemur have propagated to the public DNS or not.
Invoked by wait_for_dns_change() function
"""
txt_records = []
try:
dns_resolver = dns.resolver.Resolver()
dns_resolver.nameservers = [domain]
dns_response = dns_resolver.query(name, "TXT")
for rdata in dns_response:
for txt_record in rdata.strings:
txt_records.append(txt_record.decode("utf-8"))
except dns.exception.DNSException:
function = sys._getframe().f_code.co_name
metrics.send(f"{function}.fail", "counter", 1)
return False
for txt_record in txt_records:
if txt_record == token:
function = sys._getframe().f_code.co_name
metrics.send(f"{function}.success", "counter", 1)
return True
return False
def wait_for_dns_change(change_id, account_number=None):
"""
Waits and checks if the DNS changes have propagated or not.
First check the domains authoritative server. Once this succeeds,
we ask a public DNS server (Google <8.8.8.8> in our case).
"""
fqdn, token = change_id
number_of_attempts = 20
nameserver = get_authoritative_nameserver(fqdn)
for attempts in range(0, number_of_attempts):
status = _has_dns_propagated(fqdn, token, nameserver)
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"fqdn": fqdn,
"status": status,
"message": "Record status on ultraDNS authoritative server"
}
current_app.logger.debug(log_data)
if status:
time.sleep(10)
break
time.sleep(10)
if status:
nameserver = get_public_authoritative_nameserver()
for attempts in range(0, number_of_attempts):
status = _has_dns_propagated(fqdn, token, nameserver)
log_data = {
"function": function,
"fqdn": fqdn,
"status": status,
"message": "Record status on Public DNS"
}
current_app.logger.debug(log_data)
if status:
metrics.send(f"{function}.success", "counter", 1)
break
time.sleep(10)
if not status:
metrics.send(f"{function}.fail", "counter", 1, metric_tags={"fqdn": fqdn, "txt_record": token})
sentry.captureException(extra={"fqdn": str(fqdn), "txt_record": str(token)})
return
def get_zones(account_number):
"""Get zones from the UltraDNS"""
path = "/v2/zones"
zones = []
for page in _paginate(path, "zones"):
for elem in page:
# UltraDNS zone names end with a "." - Example - lemur.example.com.
# We pick out the names minus the "." at the end while returning the list
zone = Zone(elem)
if zone.authoritative_type == "PRIMARY" and zone.status == "ACTIVE":
zones.append(zone.name)
return zones
def get_zone_name(domain, account_number):
"""Get the matching zone for the given domain"""
zones = get_zones(account_number)
zone_name = ""
for z in zones:
if domain.endswith(z):
# Find the most specific zone possible for the domain
# Ex: If fqdn is a.b.c.com, there is a zone for c.com,
# and a zone for b.c.com, we want to use b.c.com.
if z.count(".") > zone_name.count("."):
zone_name = z
if not zone_name:
function = sys._getframe().f_code.co_name
metrics.send(f"{function}.fail", "counter", 1)
raise Exception(f"No UltraDNS zone found for domain: {domain}")
return zone_name
def create_txt_record(domain, token, account_number):
"""
Create a TXT record for the given domain.
The part of the domain that matches with the zone becomes the zone name.
The remainder becomes the owner name (referred to as node name here)
Example: Let's say we have a zone named "exmaple.com" in UltraDNS and we
get a request to create a cert for lemur.example.com
Domain - _acme-challenge.lemur.example.com
Matching zone - example.com
Owner name - _acme-challenge.lemur
"""
zone_name = get_zone_name(domain, account_number)
zone_parts = len(zone_name.split("."))
node_name = ".".join(domain.split(".")[:-zone_parts])
fqdn = f"{node_name}.{zone_name}"
path = f"/v2/zones/{zone_name}/rrsets/TXT/{node_name}"
params = {
"ttl": 5,
"rdata": [
f"{token}"
],
}
try:
_post(path, params)
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"fqdn": fqdn,
"token": token,
"message": "TXT record created"
}
current_app.logger.debug(log_data)
except Exception as e:
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"domain": domain,
"token": token,
"Exception": e,
"message": "Unable to add record. Record already exists."
}
current_app.logger.debug(log_data)
change_id = (fqdn, token)
return change_id
def delete_txt_record(change_id, account_number, domain, token):
"""
Delete the TXT record that was created in the create_txt_record() function.
UltraDNS handles records differently compared to Dyn. It creates an RRSet
which is a set of records of the same type and owner. This means
that while deleting the record, we cannot delete any individual record from
the RRSet. Instead, we have to delete the entire RRSet. If multiple certs are
being created for the same domain at the same time, the challenge TXT records
that are created will be added under the same RRSet. If the RRSet had more
than 1 record, then we create a new RRSet on UltraDNS minus the record that
has to be deleted.
"""
if not domain:
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"message": "No domain passed"
}
current_app.logger.debug(log_data)
return
zone_name = get_zone_name(domain, account_number)
zone_parts = len(zone_name.split("."))
node_name = ".".join(domain.split(".")[:-zone_parts])
path = f"/v2/zones/{zone_name}/rrsets/16/{node_name}"
try:
rrsets = _get(path)
record = Record(rrsets)
except Exception as e:
function = sys._getframe().f_code.co_name
metrics.send(f"{function}.geterror", "counter", 1)
# No Text Records remain or host is not in the zone anymore because all records have been deleted.
return
try:
# Remove the record from the RRSet locally
record.rdata.remove(f"{token}")
except ValueError:
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"token": token,
"message": "Token not found"
}
current_app.logger.debug(log_data)
return
# Delete the RRSet from UltraDNS
_delete(path)
# Check if the RRSet has more records. If yes, add the modified RRSet back to UltraDNS
if len(record.rdata) > 0:
params = {
"ttl": 5,
"rdata": record.rdata,
}
_post(path, params)
def delete_acme_txt_records(domain):
if not domain:
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"message": "No domain passed"
}
current_app.logger.debug(log_data)
return
acme_challenge_string = "_acme-challenge"
if not domain.startswith(acme_challenge_string):
function = sys._getframe().f_code.co_name
log_data = {
"function": function,
"domain": domain,
"acme_challenge_string": acme_challenge_string,
"message": "Domain does not start with the acme challenge string"
}
current_app.logger.debug(log_data)
return
zone_name = get_zone_name(domain)
zone_parts = len(zone_name.split("."))
node_name = ".".join(domain.split(".")[:-zone_parts])
path = f"/v2/zones/{zone_name}/rrsets/16/{node_name}"
_delete(path)
def get_authoritative_nameserver(domain):
"""Get the authoritative nameserver for the given domain"""
n = dns.name.from_text(domain)
depth = 2
default = dns.resolver.get_default_resolver()
nameserver = default.nameservers[0]
last = False
while not last:
s = n.split(depth)
last = s[0].to_unicode() == u"@"
sub = s[1]
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, nameserver)
rcode = response.rcode()
if rcode != dns.rcode.NOERROR:
function = sys._getframe().f_code.co_name
metrics.send(f"{function}.error", "counter", 1)
if rcode == dns.rcode.NXDOMAIN:
raise Exception("%s does not exist." % sub)
else:
raise Exception("Error %s" % dns.rcode.to_text(rcode))
if len(response.authority) > 0:
rrset = response.authority[0]
else:
rrset = response.answer[0]
rr = rrset[0]
if rr.rdtype != dns.rdatatype.SOA:
authority = rr.target
nameserver = default.query(authority).rrset[0].to_text()
depth += 1
return nameserver
def get_public_authoritative_nameserver():
return "8.8.8.8"

View File

@ -158,7 +158,7 @@ def map_cis_fields(options, csr):
) )
data = { data = {
"profile_name": current_app.config.get("DIGICERT_CIS_PROFILE_NAME"), "profile_name": current_app.config.get("DIGICERT_CIS_PROFILE_NAMES", {}).get(options['authority'].name),
"common_name": options["common_name"], "common_name": options["common_name"],
"additional_dns_names": get_additional_names(options), "additional_dns_names": get_additional_names(options),
"csr": csr, "csr": csr,
@ -423,9 +423,9 @@ class DigiCertCISSourcePlugin(SourcePlugin):
required_vars = [ required_vars = [
"DIGICERT_CIS_API_KEY", "DIGICERT_CIS_API_KEY",
"DIGICERT_CIS_URL", "DIGICERT_CIS_URL",
"DIGICERT_CIS_ROOT", "DIGICERT_CIS_ROOTS",
"DIGICERT_CIS_INTERMEDIATE", "DIGICERT_CIS_INTERMEDIATES",
"DIGICERT_CIS_PROFILE_NAME", "DIGICERT_CIS_PROFILE_NAMES",
] ]
validate_conf(current_app, required_vars) validate_conf(current_app, required_vars)
@ -498,9 +498,9 @@ class DigiCertCISIssuerPlugin(IssuerPlugin):
required_vars = [ required_vars = [
"DIGICERT_CIS_API_KEY", "DIGICERT_CIS_API_KEY",
"DIGICERT_CIS_URL", "DIGICERT_CIS_URL",
"DIGICERT_CIS_ROOT", "DIGICERT_CIS_ROOTS",
"DIGICERT_CIS_INTERMEDIATE", "DIGICERT_CIS_INTERMEDIATES",
"DIGICERT_CIS_PROFILE_NAME", "DIGICERT_CIS_PROFILE_NAMES",
] ]
validate_conf(current_app, required_vars) validate_conf(current_app, required_vars)
@ -537,14 +537,14 @@ class DigiCertCISIssuerPlugin(IssuerPlugin):
if "ECC" in issuer_options["key_type"]: if "ECC" in issuer_options["key_type"]:
return ( return (
"\n".join(str(end_entity).splitlines()), "\n".join(str(end_entity).splitlines()),
current_app.config.get("DIGICERT_ECC_CIS_INTERMEDIATE"), current_app.config.get("DIGICERT_ECC_CIS_INTERMEDIATES", {}).get(issuer_options['authority'].name),
data["id"], data["id"],
) )
# By default return RSA # By default return RSA
return ( return (
"\n".join(str(end_entity).splitlines()), "\n".join(str(end_entity).splitlines()),
current_app.config.get("DIGICERT_CIS_INTERMEDIATE"), current_app.config.get("DIGICERT_CIS_INTERMEDIATES", {}).get(issuer_options['authority'].name),
data["id"], data["id"],
) )
@ -577,4 +577,4 @@ class DigiCertCISIssuerPlugin(IssuerPlugin):
:return: :return:
""" """
role = {"username": "", "password": "", "name": "digicert"} role = {"username": "", "password": "", "name": "digicert"}
return current_app.config.get("DIGICERT_CIS_ROOT"), "", [role] return current_app.config.get("DIGICERT_CIS_ROOTS", {}).get(options['authority'].name), "", [role]

View File

@ -66,7 +66,7 @@ def test_map_fields_with_validity_years(app):
} }
def test_map_cis_fields(app): def test_map_cis_fields(app, authority):
from lemur.plugins.lemur_digicert.plugin import map_cis_fields from lemur.plugins.lemur_digicert.plugin import map_cis_fields
names = [u"one.example.com", u"two.example.com", u"three.example.com"] names = [u"one.example.com", u"two.example.com", u"three.example.com"]
@ -80,6 +80,7 @@ def test_map_cis_fields(app):
"organizational_unit": "Example Org", "organizational_unit": "Example Org",
"validity_end": arrow.get(2017, 5, 7), "validity_end": arrow.get(2017, 5, 7),
"validity_start": arrow.get(2016, 10, 30), "validity_start": arrow.get(2016, 10, 30),
"authority": authority,
} }
data = map_cis_fields(options, CSR_STR) data = map_cis_fields(options, CSR_STR)
@ -104,6 +105,7 @@ def test_map_cis_fields(app):
"organization": "Example, Inc.", "organization": "Example, Inc.",
"organizational_unit": "Example Org", "organizational_unit": "Example Org",
"validity_years": 2, "validity_years": 2,
"authority": authority,
} }
with freeze_time(time_to_freeze=arrow.get(2016, 11, 3).datetime): with freeze_time(time_to_freeze=arrow.get(2016, 11, 3).datetime):

View File

@ -15,6 +15,7 @@ from lemur.sources.models import Source
from lemur.certificates.models import Certificate from lemur.certificates.models import Certificate
from lemur.certificates import service as certificate_service from lemur.certificates import service as certificate_service
from lemur.endpoints import service as endpoint_service from lemur.endpoints import service as endpoint_service
from lemur.extensions import metrics
from lemur.destinations import service as destination_service from lemur.destinations import service as destination_service
from lemur.certificates.schemas import CertificateUploadInputSchema from lemur.certificates.schemas import CertificateUploadInputSchema
@ -94,6 +95,9 @@ def sync_endpoints(source):
certificate_name, endpoint["name"] certificate_name, endpoint["name"]
) )
) )
metrics.send("endpoint.certificate.not.found",
"counter", 1,
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"], "acct": s.get_option("accountNumber", source.options)})
continue continue
policy = endpoint.pop("policy") policy = endpoint.pop("policy")

View File

@ -133,7 +133,7 @@
</div> </div>
<div class="form-group" ng-hide="certificate.authority.plugin.slug == 'acme-issuer'"> <div class="form-group" ng-hide="certificate.authority.plugin.slug == 'acme-issuer'">
<label class="control-label col-sm-2" <label class="control-label col-sm-2"
uib-tooltip="If no date is selected Lemur attempts to issue a 2 year certificate"> uib-tooltip="If no date is selected Lemur attempts to issue a 1 year certificate">
Validity Range <span class="glyphicon glyphicon-question-sign"></span> Validity Range <span class="glyphicon glyphicon-question-sign"></span>
</label> </label>
<div class="col-sm-2"> <div class="col-sm-2">
@ -141,8 +141,6 @@
<option value="">-</option> <option value="">-</option>
<option value="1">1 year</option> <option value="1">1 year</option>
<option value="2">2 years</option> <option value="2">2 years</option>
<option value="3">3 years</option>
<option value="4">4 years</option>
</select> </select>
</div> </div>
<span style="padding-top: 15px" class="text-center col-sm-1"> <span style="padding-top: 15px" class="text-center col-sm-1">

View File

@ -4,6 +4,9 @@
<button ng-repeat="count in params.settings().counts" type="button" ng-class="{'active':params.count()==count}" ng-click="params.count(count)" class="btn btn-default"> <button ng-repeat="count in params.settings().counts" type="button" ng-class="{'active':params.count()==count}" ng-click="params.count(count)" class="btn btn-default">
<span ng-bind="count"></span> <span ng-bind="count"></span>
</button> </button>
<div class="centered-cell">
<span ng-bind="params.data.total"></span>&#160;Found
</div>
</div> </div>
<div class="btn-group pull-right"> <div class="btn-group pull-right">
<span ng-repeat="page in pages" ng-switch="page.type" ng-class="{'disabled': !page.active && !page.current, 'active': page.current}"> <span ng-repeat="page in pages" ng-switch="page.type" ng-class="{'disabled': !page.active && !page.current, 'active': page.current}">

View File

@ -80,6 +80,13 @@ DIGICERT_API_KEY = "api-key"
DIGICERT_ORG_ID = 111111 DIGICERT_ORG_ID = 111111
DIGICERT_ROOT = "ROOT" DIGICERT_ROOT = "ROOT"
DIGICERT_CIS_URL = "mock://www.digicert.com"
DIGICERT_CIS_PROFILE_NAMES = {"sha2-rsa-ecc-root": "ssl_plus"}
DIGICERT_CIS_API_KEY = "api-key"
DIGICERT_CIS_ROOTS = {"root": "ROOT"}
DIGICERT_CIS_INTERMEDIATES = {"inter": "INTERMEDIATE_CA_CERT"}
VERISIGN_URL = "http://example.com" VERISIGN_URL = "http://example.com"
VERISIGN_PEM_PATH = "~/" VERISIGN_PEM_PATH = "~/"
VERISIGN_FIRST_NAME = "Jim" VERISIGN_FIRST_NAME = "Jim"

13
lemur/tests/test_redis.py Normal file
View File

@ -0,0 +1,13 @@
import fakeredis
import time
import sys
def test_write_and_read_from_redis():
function = f"{__name__}.{sys._getframe().f_code.co_name}"
red = fakeredis.FakeStrictRedis()
key = f"{function}.last_success"
value = int(time.time())
assert red.set(key, value) is True
assert (int(red.get(key)) == value) is True

View File

@ -5,6 +5,7 @@ black
coverage coverage
factory-boy factory-boy
Faker Faker
fakeredis
freezegun freezegun
moto moto
nose nose

View File

@ -28,6 +28,7 @@ docutils==0.15.2 # via botocore
ecdsa==0.13.2 # via python-jose, sshpubkeys ecdsa==0.13.2 # via python-jose, sshpubkeys
factory-boy==2.12.0 factory-boy==2.12.0
faker==2.0.2 faker==2.0.2
fakeredis==1.0.5
flask==1.1.1 # via pytest-flask flask==1.1.1 # via pytest-flask
freezegun==0.3.12 freezegun==0.3.12
future==0.17.1 # via aws-xray-sdk, python-jose future==0.17.1 # via aws-xray-sdk, python-jose
@ -64,13 +65,15 @@ python-dateutil==2.8.0 # via botocore, faker, freezegun, moto
python-jose==3.0.1 # via moto python-jose==3.0.1 # via moto
pytz==2019.2 # via datetime, moto pytz==2019.2 # via datetime, moto
pyyaml==5.1.2 pyyaml==5.1.2
redis==3.3.8 # via fakeredis
requests-mock==1.7.0 requests-mock==1.7.0
requests==2.22.0 # via docker, moto, requests-mock, responses requests==2.22.0 # via docker, moto, requests-mock, responses
responses==0.10.6 # via moto responses==0.10.6 # via moto
rsa==4.0 # via python-jose rsa==4.0 # via python-jose
s3transfer==0.2.1 # via boto3 s3transfer==0.2.1 # via boto3
six==1.12.0 # via aws-sam-translator, bandit, cfn-lint, cryptography, docker, faker, freezegun, jsonschema, mock, moto, packaging, pyrsistent, python-dateutil, python-jose, requests-mock, responses, stevedore, websocket-client six==1.12.0 # via aws-sam-translator, bandit, cfn-lint, cryptography, docker, faker, fakeredis, freezegun, jsonschema, mock, moto, packaging, pyrsistent, python-dateutil, python-jose, requests-mock, responses, stevedore, websocket-client
smmap2==2.0.5 # via gitdb2 smmap2==2.0.5 # via gitdb2
sortedcontainers==2.1.0 # via fakeredis
sshpubkeys==3.1.0 # via moto sshpubkeys==3.1.0 # via moto
stevedore==1.31.0 # via bandit stevedore==1.31.0 # via bandit
text-unidecode==1.3 # via faker text-unidecode==1.3 # via faker