initial commit

This commit is contained in:
Paul 2020-07-10 00:58:55 +02:00
commit eaa6b99c5e
541 changed files with 12486 additions and 0 deletions

6
.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
# Pillars
/pillar
# Python related
*.swp
*.pyc

117
README.md Normal file
View File

@ -0,0 +1,117 @@
# paulbsd-salt
## Summary
paulbsd-salt are sets of SaltStack states for PaulBSD infrastructure
## Howto
### States
```text
states
├── acme
├── androidstudio
├── apparmor
├── appimagekit
├── apt
├── arduino
├── bareos
├── burp
├── cds
├── clamav
├── collectd
├── cron
├── dovecot
├── flash
├── gitea
├── grafana
├── gufw
├── influxdb
├── ipfs
├── iptables
├── java
├── maildb
├── mariadb
├── misc
├── _modules
├── molotov
├── motd
├── netbox
├── nextcloud_desktop
├── nftables
├── nginx
├── npf
├── opendkim
├── openvpn_client
├── openvpn_server
├── packer
├── pkg
├── postfix
├── postgresql
├── pycharm
├── rclone
├── reactor
├── repos
├── rsync
├── rsyslog
├── _runners
├── salt_minion
├── samba
├── sensu
├── services
├── snmp
├── ssh
├── _states
├── sublimetext
├── sudo
├── syncthing
├── telegraf
├── telegram
├── tests
├── time
├── tmux
├── tor
├── transmission
├── users
├── vim
├── vsftpd
├── winpkg
├── wintse
└── zsh
```
### Scripts
TBD
## License
```text
Copyright (c) 2019, 2020 PaulBSD
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the paulbsd project.
```

3
cloud/cloud Normal file
View File

@ -0,0 +1,3 @@
# vim:set ft=yaml:
wait_for_ip_timeout: 600
sync_after_install: all

20
cloud/cloud.profiles Normal file
View File

@ -0,0 +1,20 @@
# vim:set ft=yaml:
scaleway-xsmall-arm-ubuntu-16:
provider: scaleway-paris
image: eeb73cbf-78a9-4481-9e38-9aaadaf8e0c9
commercial_type: C1
scaleway-xsmall-x86-ubuntu-16:
provider: scaleway-paris
image: 047f1372-3923-471f-82ca-5ff69dbaf0f7
commercial_type: VC1S
scaleway-lamp:
provider: scaleway-paris
image: 89457135-d446-41ba-a8df-d53e5bb54710
commercial_type: VC1S
scaleway-external:
provider: scaleway-amsterdam
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
commercial_type: VC1S

14
cloud/cloud.providers Normal file
View File

@ -0,0 +1,14 @@
# vim:set ft=yaml:
scaleway-paris:
access_key:
token:
driver: scaleway
region: par1
api_root: https://cp-par1.scaleway.com
scaleway-amsterdam:
access_key:
token:
driver: scaleway
region: ams1
api_root: https://cp-ams1.scaleway.com

View File

@ -0,0 +1,6 @@
scaleway-external:
- scw01-ams.paulbsd.com:
minion:
master: salt.paulbsd.com
ssh_username: root
key_filename: /root/.ssh/id_rsa

View File

@ -0,0 +1,7 @@
scaleway-lamp:
- lamp1:
minion:
master: salt.paulbsd.com
ssh_username: root
key_filename: /root/.ssh/id_rsa
script_args: -P

View File

@ -0,0 +1,16 @@
scaleway-xsmall-x86-ubuntu-16:
- web1:
minion:
master: salt.paulbsd.com
ssh_username: root
key_filename: /root/.ssh/id_rsa
- web2:
minion:
master: salt.paulbsd.com
ssh_username: root
key_filename: /root/.ssh/id_rsa
- web3:
minion:
master: salt.paulbsd.com
ssh_username: root
key_filename: /root/.ssh/id_rsa

80
config/master Normal file
View File

@ -0,0 +1,80 @@
# # vi:syntax=yaml
interface: '0.0.0.0'
log_level: debug
log_file: /var/log/salt/master
file_recv: True
#cache: mysql
#master_job_cache: redis
#ext_job_cache: redis
return: smtp
event_return: mysql
external_auth:
pam:
paul:
- .*
salt:
- .*
- '@runner'
- '@wheel'
rest_cherrypy:
port: 8033
disable_ssl: True
runner_dirs:
- /srv/salt/states/_runners
reactor:
- 'salt/minion/*/start':
- salt://reactor/auth.sls
- 'salt/job/*/ret/*':
- salt://reactor/email-on-failure.sls
state_output: changes
cython_enable: True
file_roots:
base:
- /srv/salt/states
pillar_roots:
base:
- /srv/salt/pillar
pillar_includes_override_sls: True
pillar_merge_lists: False
pillar_source_merging_strategy: recurse
ext_pillar:
- etcd: etcd_config root=/salt/common
- etcd: etcd_config root=/salt/hosts/%(minion_id)s
etcd_config:
etcd.host: 127.0.0.1
etcd.port: 2379
mysql.host: 'scw02-ams.paulbsd.com'
mysql.user: 'salt'
mysql.pass: ''
mysql.password: ''
mysql.db: 'salt'
mysql.port: 3306
#mysql.table_name: 'salt_cache'
#smtp.from: 'salt@paulbsd.com'
#smtp.to: 'postmaster@paulbsd.com'
#smtp.host: 'smtp.paulbsd.com'
#smtp.port: 465
#smtp.username: 'sys@paulbsd.com'
#smtp.password: ''
#smtp.tls: True
#mine_functions:
# provision:
# - mine_function: state.sls
# - provision

867
config/master.sample Normal file
View File

@ -0,0 +1,867 @@
##### Primary configuration settings #####
##########################################
# This configuration file is used to manage the behavior of the Salt Master.
# Values that are commented out but have an empty line after the comment are
# defaults that do not need to be set in the config. If there is no blank line
# after the comment then the value is presented as an example and is not the
# default.
# Per default, the master will automatically include all config files
# from master.d/*.conf (master.d is a directory in the same directory
# as the main master config file).
#default_include: master.d/*.conf
# The address of the interface to bind to:
interface: '0.0.0.0'
# Whether the master should listen for IPv6 connections. If this is set to True,
# the interface option must be adjusted, too. (For example: "interface: '::'")
#ipv6: True
# The tcp port used by the publisher:
#publish_port: 4505
# The user under which the salt master will run. Salt will update all
# permissions to allow the specified user to run the master. The exception is
# the job cache, which must be deleted if this user is changed. If the
# modified files cause conflicts, set verify_env to False.
#user: root
# The port used by the communication interface. The ret (return) port is the
# interface used for the file server, authentication, job returns, etc.
#ret_port: 4506
# Specify the location of the daemon process ID file:
#pidfile: /var/run/salt-master.pid
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
#root_dir: /
# Directory used to store public key data:
#pki_dir: /usr/pkg/etc/salt/pki/master
# Directory to store job and cache data:
# This directory may contain sensitive data and should be protected accordingly.
#
#cachedir: /var/cache/salt/master
# Directory for custom modules. This directory can contain subdirectories for
# each of Salt's module types such as "runners", "output", "wheel", "modules",
# "states", "returners", etc.
#extension_modules: <no default>
# Directory for custom modules. This directory can contain subdirectories for
# each of Salt's module types such as "runners", "output", "wheel", "modules",
# "states", "returners", etc.
# Like 'extension_modules' but can take an array of paths
#module_dirs: <no default>
# - /var/cache/salt/minion/extmods
# Verify and set permissions on configuration directories at startup:
#verify_env: True
# Set the number of hours to keep old job information in the job cache:
#keep_jobs: 24
# Set the default timeout for the salt command and api. The default is 5
# seconds.
#timeout: 5
# The loop_interval option controls the seconds for the master's maintenance
# process check cycle. This process updates file server backends, cleans the
# job cache and executes the scheduler.
#loop_interval: 60
# Set the default outputter used by the salt command. The default is "nested".
#output: nested
# Return minions that timeout when running commands like test.ping
#show_timeout: True
# By default, output is colored. To disable colored output, set the color value
# to False.
#color: True
# Do not strip off the colored output from nested results and state outputs
# (true by default).
# strip_colors: False
# Set the directory used to hold unix sockets:
#sock_dir: /var/run/salt/master
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: False
# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).
# Disabling the job cache will make previously executed jobs unavailable to
# the jobs system and is not generally recommended.
#job_cache: True
# Cache minion grains and pillar data in the cachedir.
#minion_data_cache: True
# Store all returns in the given returner.
# Setting this option requires that any returner-specific configuration also
# be set. See various returners in salt/returners for details on required
# configuration values. (See also, event_return_queue below.)
#
event_return: mysql
# On busy systems, enabling event_returns can cause a considerable load on
# the storage system for returners. Events can be queued on the master and
# stored in a batched fashion using a single transaction for multiple events.
# By default, events are not queued.
#event_return_queue: 0
# Only events returns matching tags in a whitelist
# event_return_whitelist:
# - salt/master/a_tag
# - salt/master/another_tag
# Store all event returns _except_ the tags in a blacklist
# event_return_blacklist:
# - salt/master/not_this_tag
# - salt/master/or_this_one
# Passing very large events can cause the minion to consume large amounts of
# memory. This value tunes the maximum size of a message allowed onto the
# master event bus. The value is expressed in bytes.
#max_event_size: 1048576
# By default, the master AES key rotates every 24 hours. The next command
# following a key rotation will trigger a key refresh from the minion which may
# result in minions which do not respond to the first command after a key refresh.
#
# To tell the master to ping all minions immediately after an AES key refresh, set
# ping_on_rotate to True. This should mitigate the issue where a minion does not
# appear to initially respond after a key is rotated.
#
# Note that ping_on_rotate may cause high load on the master immediately after
# the key rotation event as minions reconnect. Consider this carefully if this
# salt master is managing a large number of minions.
#
# If disabled, it is recommended to handle this event by listening for the
# 'aes_key_rotate' event with the 'key' tag and acting appropriately.
# ping_on_rotate: False
# By default, the master deletes its cache of minion data when the key for that
# minion is removed. To preserve the cache after key deletion, set
# 'preserve_minion_cache' to True.
#
# WARNING: This may have security implications if compromised minions auth with
# a previous deleted minion ID.
#preserve_minion_cache: False
# If max_minions is used in large installations, the master might experience
# high-load situations because of having to check the number of connected
# minions for every authentication. This cache provides the minion-ids of
# all connected minions to all MWorker-processes and greatly improves the
# performance of max_minions.
# con_cache: False
# The master can include configuration from other files. To enable this,
# pass a list of paths to this option. The paths can be either relative or
# absolute; if relative, they are considered to be relative to the directory
# the main master configuration file lives in (this file). Paths can make use
# of shell-style globbing. If no files are matched by a path passed to this
# option, then the master will log a warning message.
#
# Include a config file from some other path:
# include: /usr/pkg/etc/salt/extra_config
#
# Include config from several files and directories:
# include:
# - /usr/pkg/etc/salt/extra_config
##### Large-scale tuning settings #####
##########################################
# Max open files
#
# Each minion connecting to the master uses AT LEAST one file descriptor, the
# master subscription connection. If enough minions connect you might start
# seeing on the console (and then salt-master crashes):
# Too many open files (tcp_listener.cpp:335)
# Aborted (core dumped)
#
# By default this value will be the one of `ulimit -Hn`, ie, the hard limit for
# max open files.
#
# If you wish to set a different value than the default one, uncomment and
# configure this setting. Remember that this value CANNOT be higher than the
# hard limit. Raising the hard limit depends on your OS and/or distribution,
# a good way to find the limit is to search the internet. For example:
# raise max open files hard limit debian
#
#max_open_files: 100000
# The number of worker threads to start. These threads are used to manage
# return calls made from minions to the master. If the master seems to be
# running slowly, increase the number of threads. This setting can not be
# set lower than 3.
#worker_threads: 5
# Set the ZeroMQ high water marks
# http://api.zeromq.org/3-2:zmq-setsockopt
# The publisher interface ZeroMQPubServerChannel
#pub_hwm: 1000
# These two ZMQ HWM settings, salt_event_pub_hwm and event_publisher_pub_hwm
# are significant for masters with thousands of minions. When these are
# insufficiently high it will manifest in random responses missing in the CLI
# and even missing from the job cache. Masters that have fast CPUs and many
# cores with appropriate worker_threads will not need these set as high.
# On deployment with 8,000 minions, 2.4GHz CPUs, 24 cores, 32GiB memory has
# these settings:
#
# salt_event_pub_hwm: 128000
# event_publisher_pub_hwm: 64000
# ZMQ high-water-mark for SaltEvent pub socket
#salt_event_pub_hwm: 20000
# ZMQ high-water-mark for EventPublisher pub socket
#event_publisher_pub_hwm: 10000
##### Security settings #####
##########################################
# Enable "open mode", this mode still maintains encryption, but turns off
# authentication, this is only intended for highly secure environments or for
# the situation where your keys end up in a bad state. If you run in open mode
# you do so at your own risk!
#open_mode: False
# Enable auto_accept, this setting will automatically accept all incoming
# public keys from the minions. Note that this is insecure.
#auto_accept: False
# Time in minutes that a incoming public key with a matching name found in
# pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys
# are removed when the master checks the minion_autosign directory.
# 0 equals no timeout
# autosign_timeout: 120
# If the autosign_file is specified, incoming keys specified in the
# autosign_file will be automatically accepted. This is insecure. Regular
# expressions as well as globing lines are supported.
#autosign_file: /usr/pkg/etc/salt/autosign.conf
# Works like autosign_file, but instead allows you to specify minion IDs for
# which keys will automatically be rejected. Will override both membership in
# the autosign_file and the auto_accept setting.
#autoreject_file: /usr/pkg/etc/salt/autoreject.conf
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
# you've given access to. This is potentially quite insecure. If an autosign_file
# is specified, enabling permissive_pki_access will allow group access to that
# specific file.
#permissive_pki_access: False
# Allow users on the master access to execute specific commands on minions.
# This setting should be treated with care since it opens up execution
# capabilities to non root users. By default this capability is completely
# disabled.
#client_acl:
# larry:
# - test.ping
# - network.*
#
# Blacklist any of the following users or modules
#
# This example would blacklist all non sudo users, including root from
# running any commands. It would also blacklist any use of the "cmd"
# module. This is completely disabled by default.
#
#client_acl_blacklist:
# users:
# - root
# - '^(?!sudo_).*$' # all non sudo users
# modules:
# - cmd
# Enforce client_acl & client_acl_blacklist when users have sudo
# access to the salt command.
#
#sudo_acl: False
# The external auth system uses the Salt auth modules to authenticate and
# validate users to access areas of the Salt system.
external_auth:
pam:
paul:
- .*
salt:
- .*
- '@runner'
- '@wheel'
rest_cherrypy:
# host: 127.0.0.1
port: 8000
disable_ssl: True
#
# Time (in seconds) for a newly generated token to live. Default: 12 hours
#token_expire: 43200
# Allow minions to push files to the master. This is disabled by default, for
# security purposes.
#file_recv: False
# Set a hard-limit on the size of the files that can be pushed to the master.
# It will be interpreted as megabytes. Default: 100
#file_recv_max_size: 100
# Signature verification on messages published from the master.
# This causes the master to cryptographically sign all messages published to its event
# bus, and minions then verify that signature before acting on the message.
#
# This is False by default.
#
# Note that to facilitate interoperability with masters and minions that are different
# versions, if sign_pub_messages is True but a message is received by a minion with
# no signature, it will still be accepted, and a warning message will be logged.
# Conversely, if sign_pub_messages is False, but a minion receives a signed
# message it will be accepted, the signature will not be checked, and a warning message
# will be logged. This behavior went away in Salt 2014.1.0 and these two situations
# will cause minion to throw an exception and drop the message.
# sign_pub_messages: False
##### Salt-SSH Configuration #####
##########################################
# Pass in an alternative location for the salt-ssh roster file
#roster_file: /usr/pkg/etc/salt/roster
# Pass in minion option overrides that will be inserted into the SHIM for
# salt-ssh calls. The local minion config is not used for salt-ssh. Can be
# overridden on a per-minion basis in the roster (`minion_opts`)
#ssh_minion_opts:
# gpg_keydir: /root/gpg
##### Master Module Management #####
##########################################
# Manage how master side modules are loaded.
# Add any additional locations to look for master runners:
#runner_dirs: []
runner_dirs:
- /usr/pkg/etc/salt/_runners
reactor:
- 'salt/minion/*/start':
- salt://reactor/auth.sls
- 'salt/job/*/ret/*':
- salt://reactor/email-on-failure.sls
state_output: changes
# Enable Cython for master side modules:
cython_enable: True
##### State System settings #####
##########################################
# The state system uses a "top" file to tell the minions what environment to
# use and what modules to use. The state_top file is defined relative to the
# root of the base environment as defined in "File Server settings" below.
#state_top: top.sls
# The master_tops option replaces the external_nodes option by creating
# a plugable system for the generation of external top data. The external_nodes
# option is deprecated by the master_tops option.
#
# To gain the capabilities of the classic external_nodes system, use the
# following configuration:
# master_tops:
# ext_nodes: <Shell command which returns yaml>
#
#master_tops: {}
# The external_nodes option allows Salt to gather data that would normally be
# placed in a top file. The external_nodes option is the executable that will
# return the ENC data. Remember that Salt will look for external nodes AND top
# files and combine the results if both are enabled!
#external_nodes: None
# The renderer to use on the minions to render the state data
#renderer: yaml_jinja
# The Jinja renderer can strip extra carriage returns and whitespace
# See http://jinja.pocoo.org/docs/api/#high-level-api
#
# If this is set to True the first newline after a Jinja block is removed
# (block, not variable tag!). Defaults to False, corresponds to the Jinja
# environment init variable "trim_blocks".
#jinja_trim_blocks: True
#
# If this is set to True leading spaces and tabs are stripped from the start
# of a line to a block. Defaults to False, corresponds to the Jinja
# environment init variable "lstrip_blocks".
#jinja_lstrip_blocks: False
# The failhard option tells the minions to stop immediately after the first
# failure detected in the state execution, defaults to False
#failhard: False
# The state_verbose and state_output settings can be used to change the way
# state system data is printed to the display. By default all data is printed.
# The state_verbose setting can be set to True or False, when set to False
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line. If set to 'mixed', the output
# will be terse unless a state failed, in which case that output will be full.
# If set to 'changes', the output will be full unless the state didn't change.
#state_output: full
# Automatically aggregate all states that have support for mod_aggregate by
# setting to 'True'. Or pass a list of state module names to automatically
# aggregate just those types.
#
# state_aggregate:
# - pkg
#
#state_aggregate: False
# Send progress events as each function in a state run completes execution
# by setting to 'True'. Progress events are in the format
# 'salt/job/<JID>/prog/<MID>/<RUN NUM>'.
#state_events: False
##### File Server settings #####
##########################################
# Salt runs a lightweight file server written in zeromq to deliver files to
# minions. This file server is built into the master daemon and does not
# require a dedicated port.
# The file server works on environments passed to the master, each environment
# can have multiple root directories, the subdirectories in the multiple file
# roots cannot match, otherwise the downloaded files will not be able to be
# reliably ensured. A base environment is required to house the top file.
# Example:
# file_roots:
# base:
# - /usr/pkg/etc/salt/states/
# dev:
# - /usr/pkg/etc/salt/states/dev/services
# - /usr/pkg/etc/salt/states/dev/states
# prod:
# - /usr/pkg/etc/salt/states/prod/services
# - /usr/pkg/etc/salt/states/prod/states
#
file_roots:
base:
- /usr/pkg/etc/salt/states
- /home/shares/repo
#
# When using multiple environments, each with their own top file, the
# default behaviour is an unordered merge. To prevent top files from
# being merged together and instead to only use the top file from the
# requested environment, set this value to 'same'.
#top_file_merging_strategy: merge
# To specify the order in which environments are merged, set the ordering
# in the env_order option. Given a conflict, the last matching value will
# win.
#env_order: ['base', 'dev', 'prod']
# If top_file_merging_strategy is set to 'same' and an environment does not
# contain a top file, the top file in the environment specified by default_top
# will be used instead.
#default_top: base
# The hash_type is the hash to use when discovering the hash of a file on
# the master server. The default is md5, but sha1, sha224, sha256, sha384
# and sha512 are also supported.
#
# Prior to changing this value, the master should be stopped and all Salt
# caches should be cleared.
#hash_type: md5
# The buffer size in the file server can be adjusted here:
#file_buffer_size: 1048576
# A regular expression (or a list of expressions) that will be matched
# against the file path before syncing the modules and states to the minions.
# This includes files affected by the file.recurse state.
# For example, if you manage your custom modules and states in subversion
# and don't want all the '.svn' folders and content synced to your minions,
# you could set this to '/\.svn($|/)'. By default nothing is ignored.
#file_ignore_regex:
# - '/\.svn($|/)'
# - '/\.git($|/)'
# A file glob (or list of file globs) that will be matched against the file
# path before syncing the modules and states to the minions. This is similar
# to file_ignore_regex above, but works on globs instead of regex. By default
# nothing is ignored.
# file_ignore_glob:
# - '*.pyc'
# - '*/somefolder/*.bak'
# - '*.swp'
# File Server Backend
#
# Salt supports a modular fileserver backend system, this system allows
# the salt master to link directly to third party systems to gather and
# manage the files available to minions. Multiple backends can be
# configured and will be searched for the requested file in the order in which
# they are defined here. The default setting only enables the standard backend
# "roots" which uses the "file_roots" option.
#fileserver_backend:
# - roots
#
# To use multiple backends list them in the order they are searched:
#fileserver_backend:
# - git
# - roots
#
# Uncomment the line below if you do not want the file_server to follow
# symlinks when walking the filesystem tree. This is set to True
# by default. Currently this only applies to the default roots
# fileserver_backend.
#fileserver_followsymlinks: False
#
# Uncomment the line below if you do not want symlinks to be
# treated as the files they are pointing to. By default this is set to
# False. By uncommenting the line below, any detected symlink while listing
# files on the Master will not be returned to the Minion.
#fileserver_ignoresymlinks: True
#
# By default, the Salt fileserver recurses fully into all defined environments
# to attempt to find files. To limit this behavior so that the fileserver only
# traverses directories with SLS files and special Salt directories like _modules,
# enable the option below. This might be useful for installations where a file root
# has a very large number of files and performance is impacted. Default is False.
# fileserver_limit_traversal: False
#
# The fileserver can fire events off every time the fileserver is updated,
# these are disabled by default, but can be easily turned on by setting this
# flag to True
#fileserver_events: False
# Git File Server Backend Configuration
#
# Gitfs can be provided by one of two python modules: GitPython or pygit2. If
# using pygit2, both libgit2 and git must also be installed.
#gitfs_provider: gitpython
#
# When using the git fileserver backend at least one git remote needs to be
# defined. The user running the salt master will need read access to the repo.
#
# The repos will be searched in order to find the file requested by a client
# and the first repo to have the file will return it.
# When using the git backend branches and tags are translated into salt
# environments.
# Note: file:// repos will be treated as a remote, so refs you want used must
# exist in that repo as *local* refs.
#gitfs_remotes:
# - git://github.com/saltstack/salt-states.git
# - file:///var/git/saltmaster
#
# The gitfs_ssl_verify option specifies whether to ignore ssl certificate
# errors when contacting the gitfs backend. You might want to set this to
# false if you're using a git backend that uses a self-signed certificate but
# keep in mind that setting this flag to anything other than the default of True
# is a security concern, you may want to try using the ssh transport.
#gitfs_ssl_verify: True
#
# The gitfs_root option gives the ability to serve files from a subdirectory
# within the repository. The path is defined relative to the root of the
# repository and defaults to the repository root.
#gitfs_root: somefolder/otherfolder
#
#
##### Pillar settings #####
##########################################
# Salt Pillars allow for the building of global data that can be made selectively
# available to different minions based on minion grain filtering. The Salt
# Pillar is laid out in the same fashion as the file server, with environments,
# a top file and sls files. However, pillar data does not need to be in the
# highstate format, and is generally just key/value pairs.
pillar_roots:
base:
- /usr/pkg/etc/salt/pillar
#
#ext_pillar:
# - hiera: /etc/hiera.yaml
# - cmd_yaml: cat /usr/pkg/etc/salt/yaml
#ext_pillar:
# - mysql:
# fromdb:
# query: 'SELECT
# FROM pillar
# WHERE minion_pattern LIKE %s'
# depth: 5
# as_list: True
# with_lists: [1,3]
#_pillar_first option allows for external pillar sources to populate
# before file system pillar. This allows for targeting file system pillar from
# ext_pillar.
#ext_pillar_first: False
# The pillar_gitfs_ssl_verify option specifies whether to ignore ssl certificate
# errors when contacting the pillar gitfs backend. You might want to set this to
# false if you're using a git backend that uses a self-signed certificate but
# keep in mind that setting this flag to anything other than the default of True
# is a security concern, you may want to try using the ssh transport.
#pillar_gitfs_ssl_verify: True
# The pillar_opts option adds the master configuration file data to a dict in
# the pillar called "master". This is used to set simple configurations in the
# master config file that can then be used on minions.
#pillar_opts: False
# The pillar_safe_render_error option prevents the master from passing pillar
# render errors to the minion. This is set on by default because the error could
# contain templating data which would give that minion information it shouldn't
# have, like a password! When set true the error message will only show:
# Rendering SLS 'my.sls' failed. Please see master log for details.
#pillar_safe_render_error: True
# The pillar_source_merging_strategy option allows you to configure merging strategy
# between different sources. It accepts four values: recurse, aggregate, overwrite,
# or smart. Recurse will merge recursively mapping of data. Aggregate instructs
# aggregation of elements between sources that use the #!yamlex renderer. Overwrite
# will verwrite elements according the order in which they are processed. This is
# behavior of the 2014.1 branch and earlier. Smart guesses the best strategy based
# on the "renderer" setting and is the default value.
#pillar_source_merging_strategy: smart
# Recursively merge lists by aggregating them instead of replacing them.
#pillar_merge_lists: False
##### Syndic settings #####
##########################################
# The Salt syndic is used to pass commands through a master from a higher
# master. Using the syndic is simple. If this is a master that will have
# syndic servers(s) below it, then set the "order_masters" setting to True.
#
# If this is a master that will be running a syndic daemon for passthrough, then
# the "syndic_master" setting needs to be set to the location of the master server
# to receive commands from.
# Set the order_masters setting to True if this master will command lower
# masters' syndic interfaces.
#order_masters: False
# If this master will be running a salt syndic daemon, syndic_master tells
# this master where to receive commands from.
#syndic_master: masterofmaster
# This is the 'ret_port' of the MasterOfMaster:
#syndic_master_port: 4506
# PID file of the syndic daemon:
#syndic_pidfile: /var/run/salt-syndic.pid
# LOG file of the syndic daemon:
#syndic_log_file: syndic.log
##### Peer Publish settings #####
##########################################
# Salt minions can send commands to other minions, but only if the minion is
# allowed to. By default "Peer Publication" is disabled, and when enabled it
# is enabled for specific minions and specific commands. This allows secure
# compartmentalization of commands based on individual minions.
# The configuration uses regular expressions to match minions and then a list
# of regular expressions to match functions. The following will allow the
# minion authenticated as foo.example.com to execute functions from the test
# and pkg modules.
#peer:
# foo.example.com:
# - test.*
# - pkg.*
#
# This will allow all minions to execute all commands:
#peer:
# .*:
# - .*
#
# This is not recommended, since it would allow anyone who gets root on any
# single minion to instantly have root on all of the minions!
# Minions can also be allowed to execute runners from the salt master.
# Since executing a runner from the minion could be considered a security risk,
# it needs to be enabled. This setting functions just like the peer setting
# except that it opens up runners instead of module functions.
#
# All peer runner support is turned off by default and must be enabled before
# using. This will enable all peer runners for all minions:
#peer_run:
# .*:
# - .*
#
# To enable just the manage.up runner for the minion foo.example.com:
#peer_run:
# foo.example.com:
# - manage.up
#
#
##### Mine settings #####
#####################################
# Restrict mine.get access from minions. By default any minion has a full access
# to get all mine data from master cache. In acl definion below, only pcre matches
# are allowed.
# mine_get:
# .*:
# - .*
#
# The example below enables minion foo.example.com to get 'network.interfaces' mine
# data only, minions web* to get all network.* and disk.* mine data and all other
# minions won't get any mine data.
# mine_get:
# foo.example.com:
# - network.interfaces
# web.*:
# - network.*
# - disk.*
##### Logging settings #####
##########################################
# The location of the master log file
# The master log can be sent to a regular file, local path name, or network
# location. Remote logging works best when configured to use rsyslogd(8) (e.g.:
# ``file:///dev/log``), with rsyslogd(8) configured for network logging. The URI
# format is: <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
log_file: /var/log/salt/master
#log_file: file:///dev/log
#log_file: udp://loghost:10514
#log_file: /var/log/salt/master
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
#
#log_level: warning
#log_level: debug
# The level of messages to send to the log file.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# If using 'log_granular_levels' this must be set to the highest desired level.
#log_level_logfile: warning
# The date and time format used in log messages. Allowed date/time formating
# can be seen here: http://docs.python.org/library/time.html#time.strftime
#log_datefmt: '%H:%M:%S'
#log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
# The format of the console logging messages. Allowed formatting options can
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
#
# Console log colors are specified by these additional formatters:
#
# %(colorlevel)s
# %(colorname)s
# %(colorprocess)s
# %(colormsg)s
#
# Since it is desirable to include the surrounding brackets, '[' and ']', in
# the coloring of the messages, these color formatters also include padding as
# well. Color LogRecord attributes are only available for console logging.
#
#log_fmt_console: '%(colorlevel)s %(colormsg)s'
#log_fmt_console: '[%(levelname)-8s] %(message)s'
#
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
# This can be used to control logging levels more specificically. This
# example sets the main salt library at the 'warning' level, but sets
# 'salt.modules' to log at the 'debug' level:
# log_granular_levels:
# 'salt': 'warning'
# 'salt.modules': 'debug'
#
#log_granular_levels: {}
##### Node Groups ######
##########################################
# Node groups allow for logical groupings of minion nodes. A group consists of a group
# name and a compound target.
#nodegroups:
# group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
# group2: 'G@os:Debian and foo.domain.com'
##### Range Cluster settings #####
##########################################
# The range server (and optional port) that serves your cluster information
# https://github.com/ytoolshed/range/wiki/%22yamlfile%22-module-file-spec
#
#range_server: range:80
##### Windows Software Repo settings #####
###########################################
# Location of the repo on the master:
#winrepo_dir_ng: '/usr/pkg/etc/salt/states/win/repo-ng'
#
# List of git repositories to include with the local repo:
#winrepo_remotes_ng:
# - 'https://github.com/saltstack/salt-winrepo-ng.git'
##### Windows Software Repo settings - Pre 2015.8 #####
########################################################
# Legacy repo settings for pre-2015.8 Windows minions.
#
# Location of the repo on the master:
#winrepo_dir: '/usr/pkg/etc/salt/states/win/repo'
#
# Location of the master's repo cache file:
#winrepo_mastercachefile: '/usr/pkg/etc/salt/states/win/repo/winrepo.p'
#
# List of git repositories to include with the local repo:
#winrepo_remotes:
# - 'https://github.com/saltstack/salt-winrepo.git'
##### Returner settings ######
############################################
# Which returner(s) will be used for minion's result:
#return: mysql
#return: smtp
return: mysql,smtp
mysql.host: '127.0.0.1'
mysql.user: 'salt'
mysql.pass: 'pass'
mysql.db: 'salt'
mysql.port: 3306
###### Miscellaneous settings ######
############################################
# Default match type for filtering events tags: startswith, endswith, find, regex, fnmatch
#event_match_type: startswith
smtp.from: salt@example.com
smtp.to: salt@example.com,bob@example.com
smtp.host: localhost
smtp.port: 25

10
config/minion Executable file
View File

@ -0,0 +1,10 @@
## Managed by PaulBSD Salt
master: salt.paulbsd.com
hash_type: sha256
state_verbose: True
tcp_keepalive: True
tcp_keepalive_idle: 300
random_reauth_delay: 60
recon_default: 1000
recon_max: 10000
recon_randomize: True

781
config/minion.sample Normal file
View File

@ -0,0 +1,781 @@
##### Primary configuration settings #####
##########################################
# This configuration file is used to manage the behavior of the Salt Minion.
# With the exception of the location of the Salt Master Server, values that are
# commented out but have an empty line after the comment are defaults that need
# not be set in the config. If there is no blank line after the comment, the
# value is presented as an example and is not the default.
# Per default the minion will automatically include all config files
# from minion.d/*.conf (minion.d is a directory in the same directory
# as the main minion config file).
#default_include: minion.d/*.conf
# Set the location of the salt master server. If the master server cannot be
# resolved, then the minion will fail to start.
#master: salt
# Set http proxy information for the minion when doing requests
#proxy_host:
#proxy_port:
#proxy_username:
#proxy_password:
# If multiple masters are specified in the 'master' setting, the default behavior
# is to always try to connect to them in the order they are listed. If random_master is
# set to True, the order will be randomized instead. This can be helpful in distributing
# the load of many minions executing salt-call requests, for example, from a cron job.
# If only one master is listed, this setting is ignored and a warning will be logged.
# NOTE: If master_type is set to failover, use master_shuffle instead.
#random_master: False
# Use if master_type is set to failover.
#master_shuffle: False
# Minions can connect to multiple masters simultaneously (all masters
# are "hot"), or can be configured to failover if a master becomes
# unavailable. Multiple hot masters are configured by setting this
# value to "str". Failover masters can be requested by setting
# to "failover". MAKE SURE TO SET master_alive_interval if you are
# using failover.
# master_type: str
# Poll interval in seconds for checking if the master is still there. Only
# respected if master_type above is "failover". To disable the interval entirely,
# set the value to -1. (This may be necessary on machines which have high numbers
# of TCP connections, such as load balancers.)
# master_alive_interval: 30
# If the minion is in multi-master mode and the master_type configuration option
# is set to "failover", this setting can be set to "True" to force the minion
# to fail back to the first master in the list if the first master is back online.
#master_failback: False
# If the minion is in multi-master mode, the "master_type" configuration is set to
# "failover", and the "master_failback" option is enabled, the master failback
# interval can be set to ping the top master with this interval, in seconds.
#master_failback_interval: 0
# Set whether the minion should connect to the master via IPv6:
#ipv6: False
# Set the number of seconds to wait before attempting to resolve
# the master hostname if name resolution fails. Defaults to 30 seconds.
# Set to zero if the minion should shutdown and not retry.
# retry_dns: 30
# Set the port used by the master reply and authentication server.
#master_port: 4506
# The user to run salt.
#user: root
# The user to run salt remote execution commands as via sudo. If this option is
# enabled then sudo will be used to change the active user executing the remote
# command. If enabled the user will need to be allowed access via the sudoers
# file for the user that the salt minion is configured to run as. The most
# common option would be to use the root user. If this option is set the user
# option should also be set to a non-root user. If migrating from a root minion
# to a non root minion the minion cache should be cleared and the minion pki
# directory will need to be changed to the ownership of the new user.
#sudo_user: root
# Specify the location of the daemon process ID file.
#pidfile: /var/run/salt-minion.pid
# The root directory prepended to these options: pki_dir, cachedir, log_file,
# sock_dir, pidfile.
#root_dir: /
# The path to the minion's configuration file.
#conf_file: /usr/pkg/etc/salt/minion
# The directory to store the pki information in
#pki_dir: /usr/pkg/etc/salt/pki/minion
# Explicitly declare the id for this minion to use, if left commented the id
# will be the hostname as returned by the python call: socket.getfqdn()
# Since salt uses detached ids it is possible to run multiple minions on the
# same machine but with different ids, this can be useful for salt compute
# clusters.
#id:
# Cache the minion id to a file when the minion's id is not statically defined
# in the minion config. Defaults to "True". This setting prevents potential
# problems when automatic minion id resolution changes, which can cause the
# minion to lose connection with the master. To turn off minion id caching,
# set this config to ``False``.
#minion_id_caching: True
# Append a domain to a hostname in the event that it does not exist. This is
# useful for systems where socket.getfqdn() does not actually result in a
# FQDN (for instance, Solaris).
#append_domain:
# Custom static grains for this minion can be specified here and used in SLS
# files just like all other grains. This example sets 4 custom grains, with
# the 'roles' grain having two values that can be matched against.
#grains:
# roles:
# - webserver
# - memcache
# deployment: datacenter4
# cabinet: 13
# cab_u: 14-15
#
# Where cache data goes.
# This data may contain sensitive data and should be protected accordingly.
#cachedir: /var/cache/salt/minion
# Append minion_id to these directories. Helps with
# multiple proxies and minions running on the same machine.
# Allowed elements in the list: pki_dir, cachedir, extension_modules
# Normally not needed unless running several proxies and/or minions on the same machine
# Defaults to ['cachedir'] for proxies, [] (empty list) for regular minions
#append_minionid_config_dirs:
# Verify and set permissions on configuration directories at startup.
#verify_env: True
# The minion can locally cache the return data from jobs sent to it, this
# can be a good way to keep track of jobs the minion has executed
# (on the minion side). By default this feature is disabled, to enable, set
# cache_jobs to True.
#cache_jobs: False
# Set the directory used to hold unix sockets.
#sock_dir: /var/run/salt/minion
# Set the default outputter used by the salt-call command. The default is
# "nested".
#output: nested
#
# By default output is colored. To disable colored output, set the color value
# to False.
#color: True
# Do not strip off the colored output from nested results and state outputs
# (true by default).
# strip_colors: False
# Backup files that are replaced by file.managed and file.recurse under
# 'cachedir'/file_backups relative to their original location and appended
# with a timestamp. The only valid setting is "minion". Disabled by default.
#
# Alternatively this can be specified for each file in state files:
# /etc/ssh/sshd_config:
# file.managed:
# - source: salt://ssh/sshd_config
# - backup: minion
#
#backup_mode: minion
# When waiting for a master to accept the minion's public key, salt will
# continuously attempt to reconnect until successful. This is the time, in
# seconds, between those reconnection attempts.
#acceptance_wait_time: 10
# If this is nonzero, the time between reconnection attempts will increase by
# acceptance_wait_time seconds per iteration, up to this maximum. If this is
# set to zero, the time between reconnection attempts will stay constant.
#acceptance_wait_time_max: 0
# If the master rejects the minion's public key, retry instead of exiting.
# Rejected keys will be handled the same as waiting on acceptance.
#rejected_retry: False
# When the master key changes, the minion will try to re-auth itself to receive
# the new master key. In larger environments this can cause a SYN flood on the
# master because all minions try to re-auth immediately. To prevent this and
# have a minion wait for a random amount of time, use this optional parameter.
# The wait-time will be a random number of seconds between 0 and the defined value.
random_reauth_delay: 60
# When waiting for a master to accept the minion's public key, salt will
# continuously attempt to reconnect until successful. This is the timeout value,
# in seconds, for each individual attempt. After this timeout expires, the minion
# will wait for acceptance_wait_time seconds before trying again. Unless your master
# is under unusually heavy load, this should be left at the default.
#auth_timeout: 60
# Number of consecutive SaltReqTimeoutError that are acceptable when trying to
# authenticate.
#auth_tries: 7
# The number of attempts to connect to a master before giving up.
# Set this to -1 for unlimited attempts. This allows for a master to have
# downtime and the minion to reconnect to it later when it comes back up.
# In 'failover' mode, it is the number of attempts for each set of masters.
# In this mode, it will cycle through the list of masters for each attempt.
#
# This is different than auth_tries because auth_tries attempts to
# retry auth attempts with a single master. auth_tries is under the
# assumption that you can connect to the master but not gain
# authorization from it. master_tries will still cycle through all
# the masters in a given try, so it is appropriate if you expect
# occasional downtime from the master(s).
#master_tries: 1
# If authentication fails due to SaltReqTimeoutError during a ping_interval,
# cause sub minion process to restart.
#auth_safemode: False
# Ping Master to ensure connection is alive (minutes).
#ping_interval: 0
# To auto recover minions if master changes IP address (DDNS)
# auth_tries: 10
# auth_safemode: False
# ping_interval: 90
#
# Minions won't know master is missing until a ping fails. After the ping fail,
# the minion will attempt authentication and likely fails out and cause a restart.
# When the minion restarts it will resolve the masters IP and attempt to reconnect.
# If you don't have any problems with syn-floods, don't bother with the
# three recon_* settings described below, just leave the defaults!
#
# The ZeroMQ pull-socket that binds to the masters publishing interface tries
# to reconnect immediately, if the socket is disconnected (for example if
# the master processes are restarted). In large setups this will have all
# minions reconnect immediately which might flood the master (the ZeroMQ-default
# is usually a 100ms delay). To prevent this, these three recon_* settings
# can be used.
# recon_default: the interval in milliseconds that the socket should wait before
# trying to reconnect to the master (1000ms = 1 second)
#
# recon_max: the maximum time a socket should wait. each interval the time to wait
# is calculated by doubling the previous time. if recon_max is reached,
# it starts again at recon_default. Short example:
#
# reconnect 1: the socket will wait 'recon_default' milliseconds
# reconnect 2: 'recon_default' * 2
# reconnect 3: ('recon_default' * 2) * 2
# reconnect 4: value from previous interval * 2
# reconnect 5: value from previous interval * 2
# reconnect x: if value >= recon_max, it starts again with recon_default
#
# recon_randomize: generate a random wait time on minion start. The wait time will
# be a random value between recon_default and recon_default +
# recon_max. Having all minions reconnect with the same recon_default
# and recon_max value kind of defeats the purpose of being able to
# change these settings. If all minions have the same values and your
# setup is quite large (several thousand minions), they will still
# flood the master. The desired behavior is to have timeframe within
# all minions try to reconnect.
#
# Example on how to use these settings. The goal: have all minions reconnect within a
# 60 second timeframe on a disconnect.
recon_default: 1000
recon_max: 59000
recon_randomize: True
#
# Each minion will have a randomized reconnect value between 'recon_default'
# and 'recon_default + recon_max', which in this example means between 1000ms
# 60000ms (or between 1 and 60 seconds). The generated random-value will be
# doubled after each attempt to reconnect. Lets say the generated random
# value is 11 seconds (or 11000ms).
# reconnect 1: wait 11 seconds
# reconnect 2: wait 22 seconds
# reconnect 3: wait 33 seconds
# reconnect 4: wait 44 seconds
# reconnect 5: wait 55 seconds
# reconnect 6: wait time is bigger than 60 seconds (recon_default + recon_max)
# reconnect 7: wait 11 seconds
# reconnect 8: wait 22 seconds
# reconnect 9: wait 33 seconds
# reconnect x: etc.
#
# In a setup with ~6000 thousand hosts these settings would average the reconnects
# to about 100 per second and all hosts would be reconnected within 60 seconds.
# recon_default: 100
# recon_max: 5000
# recon_randomize: False
#
#
# The loop_interval sets how long in seconds the minion will wait between
# evaluating the scheduler and running cleanup tasks. This defaults to a
# sane 60 seconds, but if the minion scheduler needs to be evaluated more
# often lower this value
#loop_interval: 60
# The grains can be merged, instead of overridden, using this option.
# This allows custom grains to defined different subvalues of a dictionary
# grain. By default this feature is disabled, to enable set grains_deep_merge
# to ``True``.
#grains_deep_merge: False
# The grains_refresh_every setting allows for a minion to periodically check
# its grains to see if they have changed and, if so, to inform the master
# of the new grains. This operation is moderately expensive, therefore
# care should be taken not to set this value too low.
#
# Note: This value is expressed in __minutes__!
#
# A value of 10 minutes is a reasonable default.
#
# If the value is set to zero, this check is disabled.
#grains_refresh_every: 1
# Cache grains on the minion. Default is False.
#grains_cache: False
# Cache rendered pillar data on the minion. Default is False.
# This may cause 'cachedir'/pillar to contain sensitive data that should be
# protected accordingly.
#minion_pillar_cache: False
# Grains cache expiration, in seconds. If the cache file is older than this
# number of seconds then the grains cache will be dumped and fully re-populated
# with fresh data. Defaults to 5 minutes. Will have no effect if 'grains_cache'
# is not enabled.
# grains_cache_expiration: 300
# Determines whether or not the salt minion should run scheduled mine updates.
# Defaults to "True". Set to "False" to disable the scheduled mine updates
# (this essentially just does not add the mine update function to the minion's
# scheduler).
#mine_enabled: True
# Determines whether or not scheduled mine updates should be accompanied by a job
# return for the job cache. Defaults to "False". Set to "True" to include job
# returns in the job cache for mine updates.
#mine_return_job: False
# Example functions that can be run via the mine facility
# NO mine functions are established by default.
# Note these can be defined in the minion's pillar as well.
#mine_functions:
# test.ping: []
# network.ip_addrs:
# interface: eth0
# cidr: '10.0.0.0/8'
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
# process communications. Set ipc_mode to 'tcp' on such systems
#ipc_mode: ipc
# Overwrite the default tcp ports used by the minion when in tcp mode
#tcp_pub_port: 4510
#tcp_pull_port: 4511
# Passing very large events can cause the minion to consume large amounts of
# memory. This value tunes the maximum size of a message allowed onto the
# minion event bus. The value is expressed in bytes.
#max_event_size: 1048576
# To detect failed master(s) and fire events on connect/disconnect, set
# master_alive_interval to the number of seconds to poll the masters for
# connection events.
#
#master_alive_interval: 30
# The minion can include configuration from other files. To enable this,
# pass a list of paths to this option. The paths can be either relative or
# absolute; if relative, they are considered to be relative to the directory
# the main minion configuration file lives in (this file). Paths can make use
# of shell-style globbing. If no files are matched by a path passed to this
# option then the minion will log a warning message.
#
# Include a config file from some other path:
# include: /usr/pkg/etc/salt/extra_config
#
# Include config from several files and directories:
#include:
# - /usr/pkg/etc/salt/extra_config
# - /etc/roles/webserver
# The syndic minion can verify that it is talking to the correct master via the
# key fingerprint of the higher-level master with the "syndic_finger" config.
#syndic_finger: ''
#
#
#
##### Minion module management #####
##########################################
# Disable specific modules. This allows the admin to limit the level of
# access the master has to the minion.
#disable_modules: [cmd,test]
#disable_returners: []
# This is the reverse of disable_modules. The default, like disable_modules, is the empty list,
# but if this option is set to *anything* then *only* those modules will load.
# Note that this is a very large hammer and it can be quite difficult to keep the minion working
# the way you think it should since Salt uses many modules internally itself. At a bare minimum
# you need the following enabled or else the minion won't start.
#whitelist_modules:
# - cmdmod
# - test
# - config
# Modules can be loaded from arbitrary paths. This enables the easy deployment
# of third party modules. Modules for returners and minions can be loaded.
# Specify a list of extra directories to search for minion modules and
# returners. These paths must be fully qualified!
#module_dirs: []
#returner_dirs: []
#states_dirs: []
#render_dirs: []
#utils_dirs: []
#
# A module provider can be statically overwritten or extended for the minion
# via the providers option, in this case the default module will be
# overwritten by the specified module. In this example the pkg module will
# be provided by the yumpkg5 module instead of the system default.
#providers:
# pkg: yumpkg5
#
# Enable Cython modules searching and loading. (Default: False)
#cython_enable: False
#
# Specify a max size (in bytes) for modules on import. This feature is currently
# only supported on *nix operating systems and requires psutil.
# modules_max_memory: -1
##### State Management Settings #####
###########################################
# The state management system executes all of the state templates on the minion
# to enable more granular control of system state management. The type of
# template and serialization used for state management needs to be configured
# on the minion, the default renderer is yaml_jinja. This is a yaml file
# rendered from a jinja template, the available options are:
# yaml_jinja
# yaml_mako
# yaml_wempy
# json_jinja
# json_mako
# json_wempy
#
#renderer: yaml_jinja
#
# The failhard option tells the minions to stop immediately after the first
# failure detected in the state execution. Defaults to False.
#failhard: False
#
# Reload the modules prior to a highstate run.
#autoload_dynamic_modules: True
#
# clean_dynamic_modules keeps the dynamic modules on the minion in sync with
# the dynamic modules on the master, this means that if a dynamic module is
# not on the master it will be deleted from the minion. By default, this is
# enabled and can be disabled by changing this value to False.
#clean_dynamic_modules: True
#
# Normally, the minion is not isolated to any single environment on the master
# when running states, but the environment can be isolated on the minion side
# by statically setting it. Remember that the recommended way to manage
# environments is to isolate via the top file.
#environment: None
#
# Isolates the pillar environment on the minion side. This functions the same
# as the environment setting, but for pillar instead of states.
#pillarenv: None
#
# If using the local file directory, then the state top file name needs to be
# defined, by default this is top.sls.
#state_top: top.sls
#
# Run states when the minion daemon starts. To enable, set startup_states to:
# 'highstate' -- Execute state.highstate
# 'sls' -- Read in the sls_list option and execute the named sls files
# 'top' -- Read top_file option and execute based on that file on the Master
#startup_states: ''
#
# List of states to run when the minion starts up if startup_states is 'sls':
#sls_list:
# - edit.vim
# - hyper
#
# Top file to execute if startup_states is 'top':
#top_file: ''
# Automatically aggregate all states that have support for mod_aggregate by
# setting to True. Or pass a list of state module names to automatically
# aggregate just those types.
#
# state_aggregate:
# - pkg
#
#state_aggregate: False
##### File Directory Settings #####
##########################################
# The Salt Minion can redirect all file server operations to a local directory,
# this allows for the same state tree that is on the master to be used if
# copied completely onto the minion. This is a literal copy of the settings on
# the master but used to reference a local directory on the minion.
# Set the file client. The client defaults to looking on the master server for
# files, but can be directed to look at the local file directory setting
# defined below by setting it to "local". Setting a local file_client runs the
# minion in masterless mode.
#file_client: remote
# The file directory works on environments passed to the minion, each environment
# can have multiple root directories, the subdirectories in the multiple file
# roots cannot match, otherwise the downloaded files will not be able to be
# reliably ensured. A base environment is required to house the top file.
# Example:
# file_roots:
# base:
# - /usr/pkg/etc/salt/states/
# dev:
# - /usr/pkg/etc/salt/states/dev/services
# - /usr/pkg/etc/salt/states/dev/states
# prod:
# - /usr/pkg/etc/salt/states/prod/services
# - /usr/pkg/etc/salt/states/prod/states
#
#file_roots:
# base:
# - /usr/pkg/etc/salt/states
# Uncomment the line below if you do not want the file_server to follow
# symlinks when walking the filesystem tree. This is set to True
# by default. Currently this only applies to the default roots
# fileserver_backend.
#fileserver_followsymlinks: False
#
# Uncomment the line below if you do not want symlinks to be
# treated as the files they are pointing to. By default this is set to
# False. By uncommenting the line below, any detected symlink while listing
# files on the Master will not be returned to the Minion.
#fileserver_ignoresymlinks: True
#
# By default, the Salt fileserver recurses fully into all defined environments
# to attempt to find files. To limit this behavior so that the fileserver only
# traverses directories with SLS files and special Salt directories like _modules,
# enable the option below. This might be useful for installations where a file root
# has a very large number of files and performance is negatively impacted. Default
# is False.
#fileserver_limit_traversal: False
# The hash_type is the hash to use when discovering the hash of a file in
# the local fileserver. The default is sha256, sha224, sha384 and sha512 are also supported.
#
# WARNING: While md5 and sha1 are also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
#
# Warning: Prior to changing this value, the minion should be stopped and all
# Salt caches should be cleared.
#hash_type: sha256
# The Salt pillar is searched for locally if file_client is set to local. If
# this is the case, and pillar data is defined, then the pillar_roots need to
# also be configured on the minion:
#pillar_roots:
# base:
# - /usr/pkg/etc/salt/pillar
# Set a hard-limit on the size of the files that can be pushed to the master.
# It will be interpreted as megabytes. Default: 100
#file_recv_max_size: 100
#
#
###### Security settings #####
###########################################
# Enable "open mode", this mode still maintains encryption, but turns off
# authentication, this is only intended for highly secure environments or for
# the situation where your keys end up in a bad state. If you run in open mode
# you do so at your own risk!
#open_mode: False
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
# you've given access to. This is potentially quite insecure.
#permissive_pki_access: False
# The state_verbose and state_output settings can be used to change the way
# state system data is printed to the display. By default all data is printed.
# The state_verbose setting can be set to True or False, when set to False
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line.
#state_output: full
# The state_output_diff setting changes whether or not the output from
# successful states is returned. Useful when even the terse output of these
# states is cluttering the logs. Set it to True to ignore them.
#state_output_diff: False
# The state_output_profile setting changes whether profile information
# will be shown for each state run.
#state_output_profile: True
# Fingerprint of the master public key to validate the identity of your Salt master
# before the initial key exchange. The master fingerprint can be found by running
# "salt-key -F master" on the Salt master.
#master_finger: ''
###### Thread settings #####
###########################################
# Disable multiprocessing support, by default when a minion receives a
# publication a new process is spawned and the command is executed therein.
#multiprocessing: True
##### Logging settings #####
##########################################
# The location of the minion log file
# The minion log can be sent to a regular file, local path name, or network
# location. Remote logging works best when configured to use rsyslogd(8) (e.g.:
# ``file:///dev/log``), with rsyslogd(8) configured for network logging. The URI
# format is: <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
#log_file: /var/log/salt/minion
#log_file: file:///dev/log
#log_file: udp://loghost:10514
#
#log_file: /var/log/salt/minion
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
#
# Default: 'warning'
log_level: debug
# The level of messages to send to the log file.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# If using 'log_granular_levels' this must be set to the highest desired level.
# Default: 'warning'
#log_level_logfile:
# The date and time format used in log messages. Allowed date/time formatting
# can be seen here: http://docs.python.org/library/time.html#time.strftime
#log_datefmt: '%H:%M:%S'
#log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
# The format of the console logging messages. Allowed formatting options can
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
#
# Console log colors are specified by these additional formatters:
#
# %(colorlevel)s
# %(colorname)s
# %(colorprocess)s
# %(colormsg)s
#
# Since it is desirable to include the surrounding brackets, '[' and ']', in
# the coloring of the messages, these color formatters also include padding as
# well. Color LogRecord attributes are only available for console logging.
#
#log_fmt_console: '%(colorlevel)s %(colormsg)s'
#log_fmt_console: '[%(levelname)-8s] %(message)s'
#
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
# This can be used to control logging levels more specificically. This
# example sets the main salt library at the 'warning' level, but sets
# 'salt.modules' to log at the 'debug' level:
# log_granular_levels:
# 'salt': 'warning'
# 'salt.modules': 'debug'
#
#log_granular_levels: {}
# To diagnose issues with minions disconnecting or missing returns, ZeroMQ
# supports the use of monitor sockets to log connection events. This
# feature requires ZeroMQ 4.0 or higher.
#
# To enable ZeroMQ monitor sockets, set 'zmq_monitor' to 'True' and log at a
# debug level or higher.
#
# A sample log event is as follows:
#
# [DEBUG ] ZeroMQ event: {'endpoint': 'tcp://127.0.0.1:4505', 'event': 512,
# 'value': 27, 'description': 'EVENT_DISCONNECTED'}
#
# All events logged will include the string 'ZeroMQ event'. A connection event
# should be logged as the minion starts up and initially connects to the
# master. If not, check for debug log level and that the necessary version of
# ZeroMQ is installed.
#
#zmq_monitor: False
###### Module configuration #####
###########################################
# Salt allows for modules to be passed arbitrary configuration data, any data
# passed here in valid yaml format will be passed on to the salt minion modules
# for use. It is STRONGLY recommended that a naming convention be used in which
# the module name is followed by a . and then the value. Also, all top level
# data must be applied via the yaml dict construct, some examples:
#
# You can specify that all modules should run in test mode:
#test: True
#
# A simple value for the test module:
#test.foo: foo
#
# A list for the test module:
#test.bar: [baz,quo]
#
# A dict for the test module:
#test.baz: {spam: sausage, cheese: bread}
#
#
###### Update settings ######
###########################################
# Using the features in Esky, a salt minion can both run as a frozen app and
# be updated on the fly. These options control how the update process
# (saltutil.update()) behaves.
#
# The url for finding and downloading updates. Disabled by default.
#update_url: False
#
# The list of services to restart after a successful update. Empty by default.
#update_restart_services: []
###### Keepalive settings ######
############################################
# ZeroMQ now includes support for configuring SO_KEEPALIVE if supported by
# the OS. If connections between the minion and the master pass through
# a state tracking device such as a firewall or VPN gateway, there is
# the risk that it could tear down the connection the master and minion
# without informing either party that their connection has been taken away.
# Enabling TCP Keepalives prevents this from happening.
# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
# or leave to the OS defaults (-1), on Linux, typically disabled. Default True, enabled.
#tcp_keepalive: True
# How long before the first keepalive should be sent in seconds. Default 300
# to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds
# on Linux see /proc/sys/net/ipv4/tcp_keepalive_time.
#tcp_keepalive_idle: 300
# How many lost probes are needed to consider the connection lost. Default -1
# to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes.
#tcp_keepalive_cnt: -1
# How often, in seconds, to send keepalives after the first one. Default -1 to
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: -1
###### Windows Software settings ######
############################################
# Location of the repository cache file on the master:
#win_repo_cachefile: 'salt://win/repo/winrepo.p'
###### Returner settings ######
############################################
# Which returner(s) will be used for minion's result:
#return: mysql
###### Miscellaneous settings ######
############################################
# Default match type for filtering events tags: startswith, endswith, find, regex, fnmatch
#event_match_type: startswith

17
config/roster Normal file
View File

@ -0,0 +1,17 @@
scw01-ams:
host: scw01-ams.paulbsd.com
user: paul
sudo: True
scw02-ams:
host: scw02-ams.paulbsd.com
user: paul
lxc01:
host: lxc01.paulbsd.com
user: paul
sudo: True
nuc:
host: nuc.paulbsd.com
user: paul

11
config/roster.sample Normal file
View File

@ -0,0 +1,11 @@
scw01-ams.paulbsd.com:
host: scw01-ams.paulbsd.com
user: paul
sudo: True
priv: /home/paul/.ssh/id_rsa
aws01-par.paulbsd.com:
host: aws01-par.paulbsd.com
user: ubuntu
sudo: True
priv: /home/paul/.ssh/id_rsa

10
scripts/encrypt_password Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
key_id=salt
if [[ $1 != "" ]]
then
echo -n $1 | gpg --armor --batch --homedir="/etc/salt/gpgkeys" --trust-model always --encrypt -r "${key_id}"
else
echo "Please specify a password"
fi

2
scripts/salt-test.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/bash
salt-call -l debug --local --file-root=./states state.sls $1

19
states/_modules/custom.py Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/python3
import salt.exceptions
def current_state(name):
ret = dict()
ret['name'] = name
ret['foo'] = 'foo'
return ret
def change_state(name, foo):
ret = dict()
ret['name'] = name
ret['foo'] = foo
return ret

28
states/_modules/dkron.py Normal file
View File

@ -0,0 +1,28 @@
#!/usr/bin/python3
import requests
import json
def get_jobs(url="http://localhost:8080", verify=False):
fullurl = f"{url}/v1/jobs"
ret = dict()
try:
req = requests.request("get", fullurl, verify=verify)
except (requests.exceptions.RequestException) as exc:
raise f"Exception {exc} occured"
ret = req.json()
if req.status_code == 200:
return ret
return None
def set_jobs(url="http://localhost:8080", verify=False, job={}):
fullurl = f"{url}/v1/jobs"
ret = dict()
try:
req = requests.request("post", fullurl, verify=verify, json=job)
except (requests.exceptions.RequestException) as exc:
raise f"Exception {exc} occured"
ret = req.json()
if req.status_code == 201:
return ret
return None

153
states/_modules/ovhapi.py Normal file
View File

@ -0,0 +1,153 @@
#!/usr/bin/python3
from __future__ import absolute_import, unicode_literals, print_function
import re
import salt
import requests
import ovh
from salt.exceptions import CommandExecutionError, ArgumentValueError
from ovh.exceptions import ResourceNotFoundError, APIError
def __virtual__():
return True
def _config():
config = __salt__['config.get']('ovh')
if not config:
raise CommandExecutionError(
'OVH execution module configuration could not be found'
)
return config
def _auth():
cfg = _config()
client = ovh.Client(
endpoint=cfg['endpoint'],
application_key=cfg['application_key'],
application_secret=cfg['application_secret'],
consumer_key=cfg['consumer_key'],
)
return client
def domain_get_zone(zone=""):
'''
Get DNS zone extraction
zone
Zone name to fetch
'''
if zone == "":
raise ArgumentValueError("Zone is not defined")
client = _auth()
results = client.get(f'/domain/zone/{zone}/export')
return results
def domain_get_record(zone="", fieldType="", subDomain=""):
'''
Records of the zone
zone
Zone name to fetch
fieldType
Filter the value of fieldType property (like)
subDomain
Filter the value of subDomain property (like)
'''
if zone == "":
raise ArgumentValueError("Zone is not defined")
results = []
client = _auth()
try:
records = client.get(f'/domain/zone/{zone}/record',
fieldType=fieldType,
subDomain=subDomain)
except APIError:
return "Query failed in OVH API"
for record in records:
try:
req = client.get(f'/domain/zone/{zone}/record/{record}')
results.append(req)
except APIError:
return "Query failed in OVH API"
return results
def domain_post_record(zone="", fieldType="", subDomain="", target="", ttl=0):
'''
Create a new DNS record
zone
The internal name of your zone
fieldType
Filter the value of fieldType property (like)
subDomain
Filter the value of subDomain property (like)
target
Resource record target
ttl
Resource record ttl
'''
if zone == "":
raise ArgumentValueError("Zone is not defined")
client = _auth()
req = client.post(f'/domain/zone/{zone}/record',
fieldType=fieldType,
subDomain=subDomain,
target=target,
ttl=ttl)
return req
def domain_delete_record(zone="", fieldType="", subDomain=""):
'''
Delete a DNS record (Don't forget to refresh the zone)
zone
The internal name of your zone
fieldType
Filter the value of fieldType property (like)
subDomain
Filter the value of subDomain property (like)
'''
if zone == "":
raise ArgumentValueError("Zone is not defined")
results = []
client = _auth()
try:
records = client.get(f'/domain/zone/{zone}/record',
fieldType=fieldType,
subDomain=subDomain)
except APIError:
return "Query failed in OVH API"
for record in records:
try:
req = client.delete(f'/domain/zone/{zone}/record/{record}')
results.append(req)
except ResourceNotFoundError:
return "Resource not found in OVH API"
return results
def domain_refresh_zone(zone=""):
'''
Apply zone modification on DNS servers
zone
The internal name of your zone
'''
if zone == "":
raise ArgumentValueError("Zone is not defined")
client = _auth()
req = client.post(f'/domain/zone/{zone}/refresh')
return req

View File

@ -0,0 +1,59 @@
#!/usr/bin/python3
import requests
import json
import salt.exceptions
import xml.etree.ElementTree as ET
def get_apikey(configfile="/root/.config/syncthing/config.xml"):
try:
tree = ET.parse(configfile)
root = tree.getroot()
apikey = root.find("./gui/apikey").text
return apikey
except (FileNotFoundError,ET.ParseError,AttributeError) as e:
raise "Exception {0} occured".format(e)
return ""
def get_config(url, verify, apikey):
fullurl = "{0}/rest/system/config".format(url)
ret = dict()
try:
req = requests.request("get", fullurl, verify=verify, headers={"X-API-Key": apikey})
except (requests.exceptions.RequestException) as exc:
raise "Exception {0} occured".format(exc)
ret = req.json()
if req.status_code == 200:
return ret
return None
def set_config(url, verify, apikey, config):
fullurl = "{0}/rest/system/config".format(url)
try:
req = requests.request("post", fullurl, verify=verify, headers={"X-API-Key": apikey}, json=config)
except (requests.exceptions.RequestException) as exc:
raise "Exception {0} occured".format(exc)
if req.status_code == 200:
return True
return None
def insync(url, verify, apikey):
fullurl = "{0}/rest/system/config/insync".format(url)
try:
req = requests.request("get", fullurl, verify=verify, headers={"X-API-Key": apikey})
except (requests.exceptions.RequestException) as exc:
raise "Exception {0} occured".format(exc)
ret = req.json()
if req.status_code == 200:
return ret
return None
def restart(url, verify, apikey):
fullurl = "{0}/rest/system/restart".format(url)
try:
req = requests.post(fullurl, verify=verify, headers={"X-API-Key": apikey})
except (requests.exceptions.RequestException) as exc:
raise "Exception {0} occured".format(exc)
if req.status_code == 200:
return {}
return None

View File

@ -0,0 +1,51 @@
#!/usr/pkg/bin/python2
#-*- coding: utf-8 -*-
import os
import subprocess
import salt.modules.smtp
import json
'''
For use with salt reactor
'''
def email_errors(fromaddr, toaddrs, subject, data_str, smtp_server):
data = eval(data_str)
error = False
changes = False
try:
if type(data['return']) is dict:
for state, result in data['return'].iteritems():
if not result['result']:
error = True
break
if result['changes']:
changes = True
break
else:
if not data['success']:
error = True
except KeyError as e:
exit()
#if error or changes:
if error:
js = subprocess.check_output(["salt-run", "--out=json", "jobs.lookup_jid", data['jid']])
body = "JobId is %s\n" % (data['jid'])
outdata = json.loads(js)
nodename = outdata.keys()[0]
for i in outdata[nodename]:
if not outdata[nodename][i]["result"]:
name = outdata[nodename][i]["name"]
comment = outdata[nodename][i]["comment"].rstrip('\n')
data = "%s- %s / %s\n" % (body, name, comment)
salt.modules.smtp.send_msg(recipient=toaddrs, message=data, subject=subject, sender=fromaddr, server=smtp_server, use_ssl=False)
return True
def email_auth(fromaddr, toaddrs, subject, data_str, smtp_server):
data = eval(data_str)
salt.modules.smtp.send_msg(recipient=toaddrs, message=data, subject=subject, sender=fromaddr, server=smtp_server, use_ssl=False)
return True

74
states/_states/custom.py Executable file
View File

@ -0,0 +1,74 @@
#!/usr/bin/python3
import salt.exceptions
def current_state(name):
ret = dict()
ret['name'] = 'blabla'
return ret
def enforce_custom_thing(name, foo, bar=True):
'''
Enforce the state of a custom thing
This state module does a custom thing. It calls out to the execution module
``y_custom_module`` in order to check the current system and perform any
needed changes.
name
The thing to do something to
foo
A required argument
bar : True
An argument with a default value
'''
ret = {
'name': name,
'changes': {},
'result': False,
'comment': '',
}
# Start with basic error-checking. Do all the passed parameters make sense
# and agree with each-other?
if bar == True and foo.startswith('Foo'):
raise salt.exceptions.SaltInvocationError(
'Argument "foo" cannot start with "Foo" if argument "bar" is True.')
# Check the current state of the system. Does anything need to change?
current_state = __salt__['custom.current_state'](name)
if current_state == foo:
ret['result'] = True
ret['comment'] = 'System already in the correct state %s' % name
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] == True:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': current_state,
'new': 'Description, diff, whatever of the new state',
}
# Return ``None`` when running with ``test=true``.
ret['result'] = None
return ret
# Finally, make the actual change and return the result.
new_state = __salt__['custom.change_state'](name, foo)
ret['comment'] = 'The state of "{0}" was changed!'.format(name)
ret['changes'] = {
'old': current_state,
'new': new_state,
}
ret['result'] = True
return ret

24
states/_states/dkron.py Normal file
View File

@ -0,0 +1,24 @@
#!/usr/bin/python3
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate
import salt.utils.dictdiffer
def jobs(name, url="http://localhost:8080", verify=False, jobs=[]):
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Config is up to date'}
#dk_jobs = []
#dk_jobs = __salt__['dkron.get_jobs'](url, verify)
for job in jobs:
res = __salt__['dkron.set_jobs'](url, verify, job)
if res is not None:
ret['changes'][job['name']] = res
else:
ret['result'] = False
ret['comment'] = "Error occured"
return ret

65
states/_states/ovhapi.py Normal file
View File

@ -0,0 +1,65 @@
#!/usr/bin/python3
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate
import salt.utils.dictdiffer
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _str_split(string):
delim = "\n"
return [e + delim for e in string.split(delim) if e]
def domain_record_present(name,
zone=None,
recordname=None,
recordtype=None,
target=None,
ttl=0):
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Config is up to date'
}
if name is None:
return _error(ret, 'Must provide name to ovhapi.domain_record_present')
if zone is None:
return _error(ret, 'Must provide dns zone to ovhapi.domain_record_present')
if recordname is None:
return _error(ret, 'Must provide record name to ovhapi.domain_record_present')
if recordtype is None:
return _error(ret, 'Must provide record type to ovhapi.domain_record_present')
if target is None:
return _error(ret, 'Must provide target to ovhapi.domain_record_present')
# check if record exists
if len(__salt__['ovhapi.domain_get_record'](zone=zone,
fieldType=recordtype,
subDomain=recordname)):
ret['comment'] = f"Record on {zone} named {recordname} with type {recordtype} already exists"
return ret
cur_zone_state = __salt__['ovhapi.domain_get_zone'](zone=zone)
res = __salt__['ovhapi.domain_post_record'](
zone=zone,
subDomain=recordname,
fieldType=recordtype,
target=target,
ttl=ttl)
new_zone_state = __salt__['ovhapi.domain_get_zone'](zone=zone)
ret['changes'] = {
"diff": salt.utils.stringutils.get_diff(_str_split(cur_zone_state), _str_split(new_zone_state))
}
ret['comment'] = f'Result is {res}'
return ret

View File

@ -0,0 +1,27 @@
#!/usr/bin/python3
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate
import salt.utils.dictdiffer
def config(name, verify, url, cfg):
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'config is up to date'}
cfg = dict(cfg)
apikey = __salt__['syncthing.get_apikey']()
st_cfg = __salt__['syncthing.get_config'](url, verify, apikey)
cfg['gui']['apiKey'] = apikey
res_cfg = salt.utils.dictupdate.update(st_cfg, cfg, recursive_update=True, merge_lists=False)
## Return to managed to set result
__salt__['syncthing.set_config'](url, verify, apikey, res_cfg)
ret['changes'] = salt.utils.dictdiffer.deep_diff(st_cfg, res_cfg)
return ret

20
states/acme/defaults.yaml Normal file
View File

@ -0,0 +1,20 @@
---
acme:
enabled: true
directories:
- "/etc/acme/dh/"
- "/etc/acme/keys/"
- "/etc/acme/certs/"
dh:
path: "/etc/acme/dh/dh.pem"
keysize: 1024
keysize: 4096
domain: "*.example.com"
dns: "dns_provider"
keyfile: "/etc/acme/keys/private.key"
fullchainfile: "/etc/acme/certs/certificate.crt"
provider:
api:
application_key: "test"
application_secret: "test"
consumer_key: "test"

50
states/acme/init.sls Normal file
View File

@ -0,0 +1,50 @@
# vim:syntax=yaml
---
{%- from "acme/map.jinja" import acme with context %}
acme-install:
cmd.run:
- name: "curl https://get.acme.sh | sh"
- runas: root
- cwd: /root
- env:
- HOME: /root
- unless: /bin/bash -c "[[ -f /root/.acme.sh/acme.sh ]]"
acme-upgrade:
cmd.run:
- name: /root/.acme.sh/acme.sh --upgrade
- runas: root
- cwd: /root
- env:
- HOME: /root
- require:
- cmd: acme-install
{%- for dir in acme.directories %}
acme-directories-{{ dir }}:
file.directory:
- name: {{ dir }}
- makedirs: True
{%- endfor %}
acme-dh-params:
cmd.run:
- name: openssl dhparam -out {{ acme.dh.path }} {{ acme.dh.keysize }}
- creates: {{ acme.dh.path }}
acme-certs:
cmd.run:
- name: /root/.acme.sh/acme.sh --issue {%- for dom in acme.domains %} -d '{{ dom }}' {% endfor -%} --dns dns_ovh --cert-file '' --key-file '{{ acme.keyfile }}' --fullchain-file '{{ acme.fullchainfile }}' -k {{ acme.keysize }}
- env:
- OVH_AK: '{{ acme.provider.api.application_key }}'
- OVH_AS: '{{ acme.provider.api.application_secret }}'
- OVH_CK: '{{ acme.provider.api.consumer_key }}'
- HOME: '/root'
- success_retcodes:
- 0
- 1
- 2
- runas: root
- cwd: /root
- require:
- cmd: acme-install

5
states/acme/map.jinja Normal file
View File

@ -0,0 +1,5 @@
{%- import_yaml "acme/defaults.yaml" as default_settings -%}
{%- set defaults = salt['grains.filter_by'](default_settings, default='acme') -%}
{%- set acme = salt['pillar.get']('acme', default=defaults, merge=True) -%}

View File

@ -0,0 +1,5 @@
---
androidstudio:
enabled: true
install_dir: /usr/local/apps
config:

View File

@ -0,0 +1,24 @@
---
# https://developer.android.com/studio/archive.html
{%- from "androidstudio/map.jinja" import androidstudio with context %}
{%- if salt['file.grep'](androidstudio.install_dir + '/android-studio/build.txt', androidstudio.version_regex)['retcode'] == 1 or not salt['file.file_exists'](androidstudio.install_dir + '/android-studio/build.txt') %}
androidstudio-archive-extract:
archive.extracted:
- name: {{ androidstudio.install_dir }}
- source: {{ androidstudio.mirror }}/{{ androidstudio.version }}/android-studio-ide-{{ androidstudio.tag }}-linux.tar.gz
- skip_verify: True
- archive_format: tar
- overwrite: True
androidstudio-shortcut:
file.managed:
- name: /usr/share/applications/jetbrains-studio.desktop
- source: salt://androidstudio/jetbrains-studio.desktop.j2
- user: root
- group: root
- mode: 644
- template: jinja
- onchanges:
- androidstudio-archive-extract
{%- endif %}

View File

@ -0,0 +1,12 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
{%- from "androidstudio/map.jinja" import androidstudio with context %}
[Desktop Entry]
Version=1.0
Type=Application
Name=Android Studio
Icon={{ androidstudio.install_dir }}/android-studio/bin/studio.png
Exec="{{ androidstudio.install_dir }}/android-studio/bin/studio.sh" %f
Comment=Develop with pleasure on Android!
Categories=Development;IDE;
Terminal=false
StartupWMClass=jetbrains-studio

View File

@ -0,0 +1,5 @@
{%- import_yaml "androidstudio/defaults.yaml" as default_settings -%}
{%- set defaults = salt['grains.filter_by'](default_settings, default='androidstudio') -%}
{%- set androidstudio = salt['pillar.get']('androidstudio', default=defaults, merge=True) -%}

View File

@ -0,0 +1,4 @@
---
apparmor:
enabled: true
config:

21
states/apparmor/init.sls Normal file
View File

@ -0,0 +1,21 @@
---
{%- from "apparmor/map.jinja" import apparmor with context %}
{%- if apparmor.enabled is defined and apparmor.enabled %}
{%- for apparmor_config in ['usr.bin.skype','opt.kingsoft','usr.bin.spotify','opt.sublime_text_3.sublime_text'] %}
apparmor-{{ apparmor_config }}:
file.managed:
- name: "/etc/apparmor.d/{{ apparmor_config }}"
- source: "salt://apparmor/{{ apparmor_config }}.j2"
- user: root
- group: root
- mode: 0644
- template: jinja
- watch_in:
- service: apparmor-reload
{%- endfor %}
apparmor-reload:
service.running:
- name: apparmor
- enable: true
{%- endif %}

View File

@ -0,0 +1,5 @@
{%- import_yaml "apparmor/defaults.yaml" as default_settings %}
{%- set defaults = salt['grains.filter_by'](default_settings, default='apparmor') %}
{%- set apparmor = salt['pillar.get']('apparmor', default=defaults, merge=True) %}

View File

@ -0,0 +1,27 @@
#include <tunables/global>
/opt/kingsoft/** {
#include <abstractions/audio>
#include <abstractions/base>
#include <abstractions/dbus>
#include <abstractions/dbus-session>
#include <abstractions/fonts>
#include <abstractions/freedesktop.org>
#include <abstractions/gnome>
#include <abstractions/ibus>
#include <abstractions/kde>
#include <abstractions/nameservice>
#include <abstractions/nvidia>
#include <abstractions/ssl_certs>
#include <abstractions/user-tmp>
#include <abstractions/X>
/opt/kingsoft/** rwmkl,
owner @{HOME}/.kingsoft/** rw,
owner @{HOME}/.config/Kingsoft/ rwmkl,
owner @{HOME}/.config/Kingsoft/** rwmkl,
owner @{HOME}/ r,
owner @{HOME}/Documents/ rw,
owner @{HOME}/Documents/** rw,
deny network inet,
}

View File

@ -0,0 +1,37 @@
#include <tunables/global>
/opt/sublime_text_3/sublime_text {
#include <abstractions/base>
#include <abstractions/X>
#include <abstractions/ibus>
#include <abstractions/dbus>
#include <abstractions/dbus-session>
#include <abstractions/dbus-accessibility>
#include <abstractions/dbus-session-strict>
#include <abstractions/gnome>
/usr/share/mate/applications/** r,
/usr/bin/caja rwix,
/usr/share/glib-*/schemas/** r,
/dev/null r,
/{dev,run}/{,shm/}** rwmkl,
/opt/sublime_text_3/ rwixmkl,
/opt/sublime_text_3/** rwixmkl,
owner @{HOME}/.config/sublime-text-3/ rwmkl,
owner @{HOME}/.config/sublime-text-3/** rwmkl,
owner @{HOME}/ rwmkl,
owner @{HOME}/** rwmkl,
deny network inet,
deny network inet6,
deny network raw,
}
/opt/sublime_text_3/plugin_host {
#include <abstractions/base>
deny network inet,
deny network inet6,
deny network raw,
}

View File

@ -0,0 +1,77 @@
#include <tunables/global>
/usr/bin/skype {
#include <abstractions/audio>
#include <abstractions/base>
#include <abstractions/dbus-session>
#include <abstractions/fonts>
#include <abstractions/freedesktop.org>
#include <abstractions/gnome>
#include <abstractions/ibus>
#include <abstractions/kde>
#include <abstractions/nameservice>
#include <abstractions/nvidia>
#include <abstractions/ssl_certs>
#include <abstractions/user-tmp>
#include <abstractions/X>
@{PROC}/sys/kernel/{ostype,osrelease} r,
@{PROC}/@{pid}/net/arp r,
owner @{PROC}/@{pid}/auxv r,
owner @{PROC}/@{pid}/cmdline r,
owner @{PROC}/@{pid}/fd/ r,
owner @{PROC}/@{pid}/task/ r,
owner @{PROC}/@{pid}/task/[0-9]*/stat r,
/sys/devices/**/power_supply/**/online r,
/sys/devices/system/cpu/ r,
/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_{cur_freq,max_freq} r,
/dev/ r,
owner /{dev,run}/shm/pulse-shm* m,
/dev/snd/* m,
/dev/video* mrw,
/var/cache/libx11/compose/* r,
# should this be in a separate KDE abstraction?
owner @{HOME}/.kde{,4}/share/config/kioslaverc r,
/usr/bin/skype mr,
/etc/xdg/sni-qt.conf rk,
/etc/xdg/Trolltech.conf rk,
/usr/share/skype/** kr,
/usr/share/skype/**/*.qm mr,
/usr/share/skype/sounds/*.wav kr,
/usr/lib{,32}/pango/** mr,
/usr/lib{,32}/libv4l/* mr,
# For opening links in the browser (still requires explicit access to execute
# the browser)
/usr/bin/xdg-open ixr,
owner @{HOME}/.Skype/ rw,
owner @{HOME}/.Skype/** krw,
owner @{HOME}/.config/ r,
owner @{HOME}/.config/*/ r,
owner @{HOME}/.config/Skype/Skype.conf rw,
owner @{HOME}/.config/Trolltech.conf kr,
# Skype traverses the .mozilla directory and needs access to prefs.js
deny owner @{HOME}/.mozilla/ r,
deny owner @{HOME}/.mozilla/**/ r,
deny owner @{HOME}/.mozilla/*/*/prefs.js r,
# Skype also looks around in these directories
/{,usr/,usr/local/}lib{,32}/ r,
# Recent skype builds have an executable stack, so it tries to mmap certain
# files. Let's deny them for now.
deny /etc/passwd m,
deny /etc/group m,
deny /usr/share/fonts/** m,
# Silence a few non-needed writes
deny /var/cache/fontconfig/ w,
deny owner @{HOME}/.fontconfig/ w,
deny owner @{HOME}/.fontconfig/*.cache-*.TMP* w,
}

View File

@ -0,0 +1,24 @@
#include <tunables/global>
/usr/bin/spotify {
#include <abstractions/base>
#include <abstractions/fonts>
#include <abstractions/kde>
#include <abstractions/nameservice>
/etc/xdg/Trolltech.conf rk,
/etc/xdg/sni-qt.conf r,
/usr/share/icons/*.theme k,
/usr/share/spotify/theme/**.{png,ico} r,
/usr/share/spotify/theme/**.{splang,xml} r,
owner @{PROC}/[0-9]*/task/ r,
owner @{HOME}/.cache/spotify/ rw,
owner @{HOME}/.cache/spotify/** rw,
owner @{HOME}/.config/Trolltech.conf rw,
owner @{HOME}/.config/spotify/ w,
owner @{HOME}/Music/** r,
}

View File

@ -0,0 +1,8 @@
---
appimage-config-dir:
file.directory:
- name: /usr/share/appimagekit/
appimage-config-file:
file.absent:
- name: /usr/share/appimagekit/no_desktopintegration

3
states/apt/10proxy.j2 Normal file
View File

@ -0,0 +1,3 @@
{%- if salt['pillar.get']('apt-proxy:name') != "None" %}
Acquire::http { proxy "http://{{salt['pillar.get']('apt-proxy:name')}}:{{ salt['pillar.get']('apt-proxy:port')}}" }
{%- endif %}

28
states/apt/init.sls Normal file
View File

@ -0,0 +1,28 @@
---
apt-unauth:
file.append:
- name: /etc/apt/apt.conf.d/99-unauth
- text: 'APT::Get::AllowUnauthenticated "true";'
apt-aptitude-install:
pkg.latest:
- pkgs:
- aptitude
- apt-transport-https
#
# apt-upgrade:
# pkg.uptodate:
# - refresh: True
#
#apt-cacher-ng-proxy:
# file.managed:
# - name: /etc/apt/apt.conf.d/10proxy
# - source: salt://apt/10proxy.j2
# - user: root
# - group: root
# - mode: 0644
# - template: jinja
#
# apt-cacher-ng-proxy-delete:
# file.absent:
# - name: /etc/apt/apt.conf.d/10proxy

View File

@ -0,0 +1,14 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
{%- from "arduino/map.jinja" import arduino with context %}
[Desktop Entry]
Type=Application
Name=Arduino IDE
GenericName=Arduino IDE
Comment=Open-source electronics prototyping platform
Exec={{ arduino.install_dir }}/arduino
Icon=arduino-arduinoide
Terminal=false
Categories=Development;IDE;Electronics;
MimeType=text/x-arduino
Keywords=embedded electronics;electronics;avr;microcontroller;
StartupWMClass=processing-app-Base

View File

@ -0,0 +1,4 @@
---
arduino:
mirror: "https://downloads.arduino.cc"
install_dir: "/usr/local/apps"

40
states/arduino/init.sls Normal file
View File

@ -0,0 +1,40 @@
---
{%- from "arduino/map.jinja" import arduino with context %}
{%- if not salt['file.directory_exists']( arduino.install_dir + '/arduino-' + arduino.version ) %}
arduino-archive-extract:
archive.extracted:
- name: {{ arduino.install_dir }}
- source: {{ arduino.mirror }}/arduino-{{ arduino.version }}-{{ arduino.arch }}.tar.xz
- skip_verify: True
- archive_format: tar
- keep: True
- if_missing: {{ arduino.install_dir }}/arduino-{{ arduino.version }}
arduino-symlink:
file.symlink:
- name: {{ arduino.install_dir }}/arduino
- target: {{ arduino.install_dir }}/arduino-{{ arduino.version }}
- force: True
- onchanges:
- arduino-archive-extract
arduino-bin-symlink:
file.symlink:
- name: /usr/bin/arduino
- target: {{ arduino.install_dir }}/arduino-{{ arduino.version }}/arduino
- force: True
- onchanges:
- arduino-archive-extract
arduino-shortcut:
file.managed:
- name: /usr/share/applications/arduino-arduinoide.desktop
- source: salt://arduino/arduino-arduinoide.desktop.j2
- template: jinja
- user: root
- group: root
- mode: 644
- onchanges:
- arduino-archive-extract
- arduino-symlink
{%- endif %}

5
states/arduino/map.jinja Normal file
View File

@ -0,0 +1,5 @@
{%- import_yaml "arduino/defaults.yaml" as default_settings -%}
{%- set defaults = salt['grains.filter_by'](default_settings, default='arduino') -%}
{%- set arduino = salt['pillar.get']('arduino', default=defaults, merge=True) -%}

View File

@ -0,0 +1,20 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
FileDaemon {
Name = {{ grains.get('host') }}
Maximum Concurrent Jobs = {{ salt['pillar.get']('bareos:director:max_concurrent_jobs', default=10) }}
Compatible = {{ salt['pillar.get']('bareos:director:compatible', default='No') }}
Working Directory = {{ salt['pillar.get']('bareos:director:working_dir', default='/var/run') }}
}
Director {
Name = {{ salt['pillar.get']('bareos:director:name', default='bareos-dir') }}
Address = {{ salt['pillar.get']('bareos:director:addr', default='localhost') }}
Password = "{{ salt['pillar.get']('bareos:director:password', default='password') }}"
Connection From Client To Director = {{ salt['pillar.get']('bareos:director:initiated', default='No') }}
}
Messages {
Name = standard
director = {{ salt['pillar.get']('bareos:director:name',default='bareos-dir') }} = all, !skipped, !restored
}

View File

@ -0,0 +1,21 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
[Unit]
Description=Bareos File Daemon
Before=multi-user.target
Before=graphical.target
Before=shutdown.target
After=network-online.target
After=remote-fs.target
After=time-sync.target
After=systemd-journald-dev-log.socket
Wants=network-online.target
Conflicts=shutdown.target
[Service]
Type=forking
KillMode=process
ExecStart=/usr/sbin/bareos-fd -c /etc/bareos/bareos-fd.conf
SuccessExitStatus=0 15
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,59 @@
---
{%- if salt['grains.get']('kernel') == 'Linux' %}
#bareos-pkg:
# pkg.purged:
# - pkgs:
# - bareos-common
# - bareos-filedaemon
# - bareos-bconsole
bareos-fd-bin:
file.managed:
- name: /usr/sbin/bareos-fd
- source: https://paulbsd-pub.s3.fr-par.scw.cloud/bareos/static-bareos-fd-{{ salt['grains.get']('osarch')|lower }}
- skip_verify: True
- user: root
- group: root
- mode: 0755
bareos-fd-service-file:
file.managed:
- name: /etc/systemd/system/bareos-fd.service
- source: salt://bareos/bareos-fd.service.j2
- user: root
- group: root
- mode: 0644
- template: jinja
- watch_in:
- service: bareos-fd-service
- require:
- file: bareos-fd-bin
bareos-fd-config-dir:
file.directory:
- name: /etc/bareos
- user: root
- group: root
- mode: 0755
bareos-fd-config:
file.managed:
- name: /etc/bareos/bareos-fd.conf
- source: salt://bareos/bareos-fd.conf.j2
- user: root
- group: root
- mode: 0644
- template: jinja
- watch_in:
- service: bareos-fd-service
- require:
- file: bareos-fd-bin
- file: bareos-fd-config-dir
bareos-fd-service:
service.running:
- name: bareos-fd
- enable: True
- require:
- file: bareos-fd-service-file
{%- endif %}

21
states/bareos/config.sls Normal file
View File

@ -0,0 +1,21 @@
---
bareos-fd-config-dir:
file.directory:
- name: {{ bareos.config_dir }}
- user: root
- group: root
- mode: 0755
bareos-fd-config:
file.managed:
- name: {{ bareos.config_dir }}/bareos-fd.conf
- source: salt://bareos/bareos-fd.conf.j2
- user: root
- group: root
- mode: 0644
- template: jinja
- watch_in:
- service: bareos-fd-service
- require:
- file: bareos-fd-bin
- file: bareos-fd-config-dir

View File

@ -0,0 +1,8 @@
---
bareos:
enabled: true
config_dir: /etc/bareos
install_dir: /usr/local/bin
mirror: https://paulbsd-pub.s3.fr-par.scw.cloud/bareos
os: linux
arch: amd64

5
states/bareos/init.sls Normal file
View File

@ -0,0 +1,5 @@
---
{%- if salt['grains.get']('kernel') == 'Linux' %}
include:
- .install
{%- endif %}

17
states/bareos/install.sls Normal file
View File

@ -0,0 +1,17 @@
---
{%- from "bareos/map.jinja" import bareos with context %}
#bareos-pkg:
# pkg.purged:
# - pkgs:
# - bareos-common
# - bareos-filedaemon
# - bareos-bconsole
bareos-fd-bin:
file.managed:
- name: /usr/sbin/bareos-fd
- source: {{ bareos.mirror }}/static-bareos-fd-{{ bareos.arch }}
- skip_verify: True
- user: root
- group: root
- mode: 0755

View File

@ -0,0 +1,3 @@
---
Linux:
os: "linux"

14
states/bareos/map.jinja Normal file
View File

@ -0,0 +1,14 @@
{%- import_yaml "bareos/defaults.yaml" as default_settings -%}
{%- import_yaml "bareos/kernelmap.yaml" as kernelmap -%}
{%- import_yaml "bareos/osarchmap.yaml" as osarchmap -%}
{%- set defaults = salt['grains.filter_by'](default_settings,
default='bareos',
merge=salt['grains.filter_by'](osarchmap, grain='osarch',
merge=salt['grains.filter_by'](kernelmap, grain='kernel')
)
)
-%}
{%- set bareos = salt['pillar.get']('bareos', default=defaults, merge=True) -%}

View File

@ -0,0 +1,21 @@
---
amd64:
arch: "amd64"
x86_64:
arch: "amd64"
386:
arch: "386"
arm64:
arch: "arm64"
armv6l:
arch: "arm"
armv7l:
arch: "arm"
armhf:
arch: "arm"

20
states/bareos/service.sls Normal file
View File

@ -0,0 +1,20 @@
---
bareos-fd-service-file:
file.managed:
- name: /etc/systemd/system/bareos-fd.service
- source: salt://bareos/bareos-fd.service.j2
- user: root
- group: root
- mode: 0644
- template: jinja
- watch_in:
- service: bareos-fd-service
- require:
- file: bareos-fd-bin
bareos-fd-service:
service.running:
- name: bareos-fd
- enable: True
- require:
- file: bareos-fd-service-file

4
states/burp/burp.conf.j2 Normal file
View File

@ -0,0 +1,4 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
{%- for key, value in burp.config.items() %}
{{ key }} = {{ value }}
{%- endfor %}

View File

@ -0,0 +1,131 @@
# This is an example config file for the burp client.
mode = client
port = 4971
# A different port to use for restores - see the man page for more options.
#port_restore = 5971
status_port = 4972
server = {{ salt["pillar.get"]("burp:name") }}
password = {{ salt["pillar.get"]("burp:password") }}
cname = {{ grains.get('fqdn') }}
# Choose the protocol to use.
# 0 to decide automatically, 1 to force protocol1 mode (file level granularity
# with a pseudo mirrored storage on the server and optional rsync). 2 forces
# protocol2 mode (inline deduplication with variable length blocks).
# protocol = 0
pidfile = /var/run/burp.client.pid
syslog = 0
stdout = 1
progress_counter = 1
# Ratelimit throttles the send speed. Specified in Megabits per second (Mb/s).
# ratelimit = 1.5
# Network timeout defaults to 7200 seconds (2 hours).
# network_timeout = 7200
# The directory to which autoupgrade files will be downloaded.
# To never autoupgrade, leave it commented out.
# autoupgrade_dir=/etc/burp/autoupgrade/client
# OS path component for the autoupgrade directory on the server.
# autoupgrade_os=test_os
# Wait a random number of seconds between 0 and the given number before
# contacting the server on a timed backup.
# randomise = 1200
# Set server_can_restore to 0 if you do not want the server to be able to
# initiate a restore.
server_can_restore = 1
# Set server_can_override_includes to 0 if you do not want the server to be
# able to override the local include/exclude list. The default is 1.
# server_can_override_includes = 1
# Set an encryption password if you do not trust the server with your data.
# Note that this will mean that network deltas will not be possible. Each time
# a file changes, the whole file will be transferred on the next backup.
# encryption_password = My^$pAsswIrD%@
# More configuration files can be read, using syntax like the following
# (without the leading '# ').
# . path/to/more/conf
# Run as different user/group.
# user=graham
# group=nogroup
cross_filesystem=/home
cross_all_filesystems=0
# Uncomment the following lines to automatically generate a certificate signing
# request and send it to the server.
ca_burp_ca = /usr/sbin/burp_ca
ca_csr_dir = /etc/burp/CA-client
# SSL certificate authority - same file on both server and client
ssl_cert_ca = /etc/burp/ssl_cert_ca.pem
# Client SSL certificate
ssl_cert = /etc/burp/ssl_cert-client.pem
# Client SSL key
ssl_key = /etc/burp/ssl_cert-client.key
# Client SSL ciphers
#ssl_ciphers =
# Client SSL compression. Default is zlib5. Set to zlib0 to turn it off.
#ssl_compression = zlib5
# SSL key password, for loading a certificate with encryption.
#ssl_key_password = password
# Common name in the certificate that the server gives us
ssl_peer_cn = burpserver
# Example syntax for pre/post scripts
#backup_script_pre=/path/to/a/script
#backup_script_post=/path/to/a/script
#restore_script_pre=/path/to/a/script
#restore_script_post=/path/to/a/script
# The following options specify exactly what to backup.
# The server will override them if there is at least one 'include=' line on
# the server side and 'server_can_override_includes=1'.
#include = /home
#exclude = /home/graham/testdir/librsync-0.9.7/testsuite
#include = /home/graham/testdir/librsync-0.9.7/testsuite/deep
#include = /home/graham/xdir
#exclude = /home/graham/testdir/libr
# Exclude file names ending in '.vdi' or '.vmdk' (case insensitive)
#exclude_ext = vdi
#exclude_ext = vmd
# Exlude file path matching a regular expression
# (note that 'include_regex' is not yet implemented)
#exclude_regex = \.cache
# Exclude various temporary file systems. You may want to add devfs, devpts,
# proc, ramfs, etc.
exclude_fs = sysfs
exclude_fs = tmpfs
# Exclude files based on size. Defaults are 0, which means no limit.
#min_file_size = 0 Mb
#max_file_size = 0 Mb
# The content of directories containing a filesystem entry named like this
# will not be backed up.
nobackup = .nobackup
# By default, burp backups up the fifos themselves, rather than reading from
# them. These two options let you choose a particular fifo to read, or read
# from all fifos.
#read_fifo=/path/to/a/fifo
#read_all_fifos=0
# The same for block device nodes.
#read_blockdev=/path/to/a/blockdev
#read_all_blockdevs=0
# Exclude files from compression by extension.
exclude_comp=bz2
exclude_comp=gz
# When backing up, whether to enable O_NOATIME when opening files and
# directories. The default is atime=0, which enables O_NOATIME.
#atime=1
# When enabled, this causes problems in the phase1 scan (such as an 'include'
# being missing) to be treated as fatal errors. The default is 0.
#scan_problem_raises_error=1

View File

@ -0,0 +1,12 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
[Unit]
Description=Burp backup task
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/sbin/burp -ab
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,12 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
[Unit]
Description=Burp backup timer
After=network-online.target
Wants=network-online.target
[Timer]
OnCalendar=Mon-Sun 23:30
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,11 @@
---
{%- from "burp/map.jinja" import burp with context %}
burp-client-config:
file.managed:
- name: /etc/burp/burp.conf
- source: salt://burp/burp.conf.j2
- user: root
- mode: 0644
- template: jinja
- require:
- pkg: burp-pkg

View File

@ -0,0 +1,19 @@
---
{%- from "burp/map.jinja" import burp with context %}
{%- if salt['grains.get']('init') == 'systemd' %}
burp-task:
file.managed:
- name: /etc/systemd/system/burp-backup.service
- source: salt://burp/burp-backup.service.j2
- user: root
- mode: 0644
- template: jinja
burp-timer:
file.managed:
- name: /etc/systemd/system/burp-backup.timer
- source: salt://burp/burp-backup.timer.j2
- user: root
- mode: 0644
- template: jinja
{%- endif %}

66
states/burp/defaults.yaml Normal file
View File

@ -0,0 +1,66 @@
---
burp:
enabled: true
server:
name: nuc.paulbsd.com
config:
mode: server
port: 4971
port_restore: 5971
status_port: 4972
password: password
cname: hostname
pidfile: /var/run/burp.client.pid
syslog: 0
stdout: 1
progress_counter: 1
server_can_restore: 1
cross_filesystem: /home
cross_all_filesystems: 0
ca_burp_ca: /usr/sbin/burp_ca
ca_csr_dir: /etc/burp/CA-client
ssl_cert_ca: /etc/burp/ssl_cert_ca.pem
ssl_cert: /etc/burp/ssl_cert-client.pem
ssl_key: /etc/burp/ssl_cert-client.key
ssl_peer_cn: burpserver
exclude_fs: sysfs
exclude_fs: tmpfs
exclude_comp: bz2
exclude_comp: gz
nobackup: .nobackup
clients:
- name: thinkpad.paulbsd.com
schedule:
type: "after"
value: "600"
- name: scw01-ams.paulbsd.com
schedule:
type: "at"
value: "22h"
client:
config:
mode: client
port: 4971
port_restore: 5971
status_port: 4972
server: nuc.paulbsd.com
password: password
cname: hostname
pidfile: /var/run/burp.client.pid
syslog: 0
stdout: 1
progress_counter: 1
server_can_restore: 1
cross_filesystem: /home
cross_all_filesystems: 0
ca_burp_ca: /usr/sbin/burp_ca
ca_csr_dir: /etc/burp/CA-client
ssl_cert_ca: /etc/burp/ssl_cert_ca.pem
ssl_cert: /etc/burp/ssl_cert-client.pem
ssl_key: /etc/burp/ssl_cert-client.key
ssl_peer_cn: burpserver
exclude_fs: sysfs
exclude_fs: tmpfs
exclude_comp: bz2
exclude_comp: gz
nobackup: .nobackup

12
states/burp/init.sls Normal file
View File

@ -0,0 +1,12 @@
---
{%- from "burp/map.jinja" import burp with context %}
include:
- .install
- .pkg
{%- if salt['grains.get']('fqdn') == burp.server.name %}
- .server.config
- .server.service
{%- elif salt['grains.get']('fqdn') in burp.hosts.name %}
- .client.config
- .client.service
{%- endif %}

7
states/burp/install.sls Normal file
View File

@ -0,0 +1,7 @@
---
{%- from "burp/map.jinja" import burp with context %}
burp-config-dir:
file.directory:
- name: /etc/burp
- user: root
- mode: 0755

8
states/burp/map.jinja Normal file
View File

@ -0,0 +1,8 @@
{%- import_yaml "burp/defaults.yaml" as defaults -%}
{%- set burp = salt['pillar.get'](
'burp',
default=defaults.burp,
merge=True
)
-%}

4
states/burp/pkg.sls Normal file
View File

@ -0,0 +1,4 @@
---
burp-pkg:
pkg.installed:
- name: burp

View File

@ -0,0 +1,10 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
[Unit]
Description=Burp backup timer
[Timer]
OnStartupSec=600
#OnCalendar=Mon-Sun 22:00
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,11 @@
---
{% from "burp/map.jinja" import burp with context %}
burp-config:
file.managed:
- name: /etc/burp/burp-server.conf
- source: salt://burp/burp.conf.j2
- user: root
- mode: 0644
- template: jinja
- require:
- pkg: burp-pkg

View File

@ -0,0 +1,11 @@
---
{% from "burp/map.jinja" import burp with context %}
{% if salt['grains.get']('init') == 'systemd' %}
burp-task:
file.managed:
- name: /etc/systemd/system/burp.service
- source: salt://burp/burp.service.j2
- user: root
- mode: 0644
- template: jinja
{% endif %}

17
states/cds/config.sls Normal file
View File

@ -0,0 +1,17 @@
---
{%- from "cds/map.jinja" import cds with context %}
cds-config-dir:
file.directory:
- name: /etc/cds
- watch_in:
- service: cds-service
cds-config-file:
file.managed:
- name: /etc/cds/cds.conf
- source: salt://cds/cds.conf.j2
- user: root
- group: root
- template: jinja
- watch_in:
- service: cds-service

7
states/cds/defaults.yaml Normal file
View File

@ -0,0 +1,7 @@
---
cds:
engine:
enabled: true
config:
mirror: https://github.com/ovh/cds/releases/download/
version: 0.43.1

6
states/cds/init.sls Normal file
View File

@ -0,0 +1,6 @@
---
{%- from "cds/map.jinja" import cds with context %}
include:
- .install
- .config
- .service

12
states/cds/install.sls Normal file
View File

@ -0,0 +1,12 @@
---
{%- from "cds/map.jinja" import cds with context %}
cds-engine-install:
file.managed:
- name: /usr/bin/cds-engine-{{ salt['pillar.get']('cds:engine:version') }}
- source: {{ cds.mirror }}/{{ cds.version }}/cds-engine-{{ salt['grains.get']('kernel')|lower }}-{{ salt['grains.get']('osarch') }}
- skip_verify: True
cds-bin-symlink:
file.symlink:
- name: /usr/bin/cds-engine
- target: /usr/bin/cds-engine-{{ salt['pillar.get']('cds:version') }}

5
states/cds/map.jinja Normal file
View File

@ -0,0 +1,5 @@
{%- import_yaml "cds/defaults.yaml" as default_settings -%}
{%- set defaults = salt['grains.filter_by'](default_settings, default='cds') -%}
{%- set cds = salt['pillar.get']('cds', default=defaults, merge=True) -%}

15
states/cds/service.sls Normal file
View File

@ -0,0 +1,15 @@
---
{%- from "cds/map.jinja" import cds with context %}
cds-service-file:
file.managed:
- name: /etc/systemd/system/cds.service
- source: salt://cds/cds.service.j2
- user: root
- group: root
- watch_in:
- service: cds-service
cds-service:
service.running:
- name: cds
- enable: True

View File

@ -0,0 +1,16 @@
[Unit]
Description=The plugin-driven server agent for reporting metrics into InfluxDB
Documentation=https://github.com/influxdata/telegraf
After=network.target
[Service]
EnvironmentFile=-/etc/default/telegraf
#User=telegraf
ExecStart=/usr/bin/telegraf --config $INFLUX_CONFIG
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartForceExitStatus=SIGPIPE
KillMode=control-group
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
---
config:
enabled: true
install_dir: "/usr/local/apps/config"
filename: "common.ini"
config: {}

19
states/config/init.sls Normal file
View File

@ -0,0 +1,19 @@
---
{%- from "config/map.jinja" import config with context %}
config-dir:
file.directory:
- name: {{ config.install_dir }}
- user: root
- mode: "0755"
config-file:
file.managed:
- name: {{ config.install_dir }}/{{ config.filename }}
- user: root
- mode: "0755"
config-file-config:
ini.options_present:
- name: {{ config.install_dir }}/{{ config.filename }}
- separator: '='
- sections: {{ config.config }}

3
states/config/map.jinja Normal file
View File

@ -0,0 +1,3 @@
{%- import_yaml "config/defaults.yaml" as defaults %}
{%- set config = salt['pillar.get']('config', default=defaults.config, merge=True) -%}

View File

@ -0,0 +1,9 @@
---
coronafana:
enabled: true
install_dir: /usr/local/apps
release_dir: /usr/local/apps/releases
mirror: https://git.paulbsd.com/paulbsd/coronafana/releases/download
version: 1.0.3
os: linux
arch: amd64

View File

@ -0,0 +1,3 @@
---
include:
- .install

View File

@ -0,0 +1,16 @@
---
{% from "coronafana/map.jinja" import coronafana with context %}
coronafana-archive-extract:
archive.extracted:
- name: {{ coronafana.release_dir }}/coronafana-{{ coronafana.version }}
- source: {{ coronafana.mirror }}/{{ coronafana.version }}/coronafana-{{ coronafana.version }}-{{ coronafana.os }}-{{ coronafana.arch }}.tar.gz
- skip_verify: True
- enforce_toplevel: False
- if_missing: {{ coronafana.release_dir }}/coronafana-{{ coronafana.version }}
coronafana-binary-symlink:
file.symlink:
- name: {{ coronafana.install_dir }}/coronafana
- target: {{ coronafana.release_dir }}/coronafana-{{ coronafana.version }}
- require:
- archive: coronafana-archive-extract

View File

@ -0,0 +1,3 @@
---
Linux:
os: "linux"

View File

@ -0,0 +1,14 @@
{%- import_yaml "coronafana/defaults.yaml" as default_settings -%}
{%- import_yaml "coronafana/kernelmap.yaml" as kernelmap -%}
{%- import_yaml "coronafana/osarchmap.yaml" as osarchmap -%}
{%- set defaults = salt['grains.filter_by'](default_settings,
default='coronafana',
merge=salt['grains.filter_by'](osarchmap, grain='osarch',
merge=salt['grains.filter_by'](kernelmap, grain='kernel')
)
)
-%}
{%- set coronafana = salt['pillar.get']('coronafana', default=defaults, merge=True) -%}

View File

@ -0,0 +1,21 @@
---
amd64:
arch: "amd64"
x86_64:
arch: "amd64"
386:
arch: "386"
arm64:
arch: "arm64"
armv6l:
arch: "arm"
armv7l:
arch: "arm"
armhf:
arch: "arm"

10
states/cron/defaults.yaml Normal file
View File

@ -0,0 +1,10 @@
---
cron:
env:
SHELL:
name: SHELL
command: /bin/bash
MAILTO:
name: MAILTO
command: {{ salt['pillar.get']('syscontact') }}
tasks:

8
states/cron/init.sls Normal file
View File

@ -0,0 +1,8 @@
---
{%- from "cron/map.jinja" import cron with context %}
{%- for key, value in cron.env.items() %}
cron-env-{{ key.lower }}:
cron.env_present:
- name: {{ value.name }}
- value: {{ value.command }}
{%- endfor %}

8
states/cron/map.jinja Normal file
View File

@ -0,0 +1,8 @@
{%- import_yaml "cron/defaults.yaml" as defaults %}
{%- set cron = salt['pillar.get'](
'cron',
default=defaults.cron,
merge=True
)
-%}

6
states/custom.sls Normal file
View File

@ -0,0 +1,6 @@
---
human_friendly_state_id:
custom.enforce_custom_thing:
- name: Nom
- foo: Valeur
- bar: False

15
states/dkron/config.sls Normal file
View File

@ -0,0 +1,15 @@
---
{%- from "dkron/map.jinja" import dkron with context -%}
dkron-config-dir:
file.directory:
- name: /etc/dkron
- user: {{ dkron.runuser }}
dkron-config:
file.managed:
- name: /etc/dkron/dkron.yml
- source: salt://dkron/dkron.yml.j2
- user: {{ dkron.runuser }}
- template: jinja
- watch_in:
- service: dkron-service

View File

@ -0,0 +1,20 @@
---
dkron:
enabled: true
install_dir: "/usr/local/apps"
release_dir: "/usr/local/apps/releases"
mirror: "https://github.com/distribworks/dkron/releases/download"
version: "2.1.1"
os: "linux"
arch: "amd64"
runuser: "dkron"
user: "user"
password: "password"
url: "http://localhost:8898"
verify: false
config:
bootstrap-expect: 1
server: true
http-addr: "127.0.0.1:8898"
data-dir: "/var/lib/dkron"
jobs: []

View File

@ -0,0 +1,15 @@
{%- from "dkron/map.jinja" import dkron with context -%}
[Unit]
Description=dkron - Open Source task scheduler
Documentation=https://dkron.io/
After=network.target
[Service]
User=%i
ExecStart={{ dkron.install_dir }}/dkron/dkron agent
Restart=on-failure
SuccessExitStatus=3 4
RestartForceExitStatus=3 4
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,4 @@
## {{ salt['pillar.get']('salt_managed', default='Salt Managed') }}
---
{%- from "dkron/map.jinja" import dkron with context %}
{{ dkron.config|yaml(False) }}

6
states/dkron/init.sls Normal file
View File

@ -0,0 +1,6 @@
---
include:
- .install
- .config
- .service
- .jobs

30
states/dkron/install.sls Normal file
View File

@ -0,0 +1,30 @@
---
{%- from "dkron/map.jinja" import dkron with context %}
dkron-user:
user.present:
- name: {{ dkron.runuser }}
dkron-archive-extract:
archive.extracted:
- name: {{ dkron.release_dir }}/dkron_{{ dkron.version }}
- source: {{ dkron.mirror }}/v{{ dkron.version }}/dkron_{{ dkron.version }}_{{ dkron.os }}_{{ dkron.arch }}.tar.gz
- user: {{ dkron.runuser }}
- enforce_toplevel: False
- skip_verify: True
- archive_format: tar
- if_missing: {{ dkron.release_dir }}/dkron_{{ dkron.version }}
dkron-bin-symlink:
file.symlink:
- name: {{ dkron.install_dir }}/dkron
- target: {{ dkron.release_dir }}/dkron_{{ dkron.version }}
- user: {{ dkron.runuser }}
- watch_in:
- service: dkron-service
dkron-data-dir:
file.directory:
- name: {{ dkron.config.get('data-dir') }}
- user: {{ dkron.runuser }}
- watch_in:
- service: dkron-service

8
states/dkron/jobs.sls Normal file
View File

@ -0,0 +1,8 @@
---
{%- from "dkron/map.jinja" import dkron with context %}
dkron-jobs:
dkron.jobs:
- name: dkron-jobs
- url: {{ dkron.url }}
- verify: {{ dkron.verify }}
- jobs: {{ dkron.jobs }}

View File

@ -0,0 +1,3 @@
---
Linux:
os: "linux"

14
states/dkron/map.jinja Normal file
View File

@ -0,0 +1,14 @@
{%- import_yaml "dkron/defaults.yaml" as default_settings -%}
{%- import_yaml "dkron/kernelmap.yaml" as kernelmap %}
{%- import_yaml "dkron/osarchmap.yaml" as osarchmap %}
{%- set defaults = salt['grains.filter_by'](default_settings,
default='dkron',
merge=salt['grains.filter_by'](osarchmap, grain='osarch',
merge=salt['grains.filter_by'](kernelmap, grain='kernel')
)
)
-%}
{%- set dkron = salt['pillar.get']('dkron', default=defaults, merge=True) -%}

Some files were not shown because too many files have changed in this diff Show More