Commit 64b608c3 authored by Bernd Zeimetz's avatar Bernd Zeimetz
Browse files

Merge branch 'master' into devel

parents f61dd538 55d1af73
# The good, old params.pp pattern :)
class percona::params {
class percona::params(
$buffersize_factor = 0.8 # Recommended for 32GB and up
) {
$bind_address = $::ipaddress
$wsrep_node_address = $::ipaddress
$buffersize = floor( $::memorysize_mb * 0.8 )
$buffersize = floor( $::memorysize_mb * $buffersize_factor )
$pool_instances = ceiling( $buffersize / 1024 )
case $::osfamily {
......
......@@ -111,7 +111,7 @@ class percona::server(
mysql_grant { '[email protected]/*.*':
ensure => 'present',
options => ['GRANT'],
privileges => ['RELOAD', 'LOCK TABLES', 'REPLICATION CLIENT'],
privileges => ['RELOAD', 'LOCK TABLES', 'REPLICATION CLIENT', 'PROCESS'],
table => '*.*',
user => '[email protected]',
require => Mysql_user['[email protected]'],
......
......@@ -10,50 +10,50 @@ class percona::server::config {
'max-allowed-packet' => '16M',
'max-connect-errors' => '1000000',
### sane modes, but disabled as too much stuff is not sane... :(
## 'sql-mode' => 'STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY',
## 'innodb-strict-mode' => '1',
## 'sql-mode' => 'STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY',
## 'innodb-strict-mode' => '1',
'sysdate-is-now' => '1',
'innodb' => 'FORCE',
'explicit_defaults_for_timestamp' => '0',
# BINARY LOGGING
'log-bin' => '/var/lib/mysql_binlog/binary_log',
'expire-logs-days' => '10',
'log-bin' => '/var/lib/mysql_binlog/binary_log',
'expire-logs-days' => '10',
# MyISAM - just in case....
'key-buffer-size' => '32M',
'myisam-recover' => 'FORCE,BACKUP',
'key-buffer-size' => '32M',
'myisam-recover' => 'FORCE,BACKUP',
# INNODB
'innodb-flush-method' => 'O_DIRECT',
'innodb-log-files-in-group' => '2',
'innodb-log-file-size' => '512M',
'innodb-file-per-table' => '1',
'innodb-buffer-pool-size' => "${buffersize}M",
'innodb_buffer_pool_instances' => $pool_instances,
'innodb_io_capacity' => '2000',
'innodb_read_io_threads' => '16',
'innodb_write_io_threads' => '16',
'innodb-flush-method' => 'O_DIRECT',
'innodb-log-files-in-group' => '2',
'innodb-log-file-size' => '512M',
'innodb-file-per-table' => '1',
'innodb-buffer-pool-size' => "${buffersize}M",
'innodb_buffer_pool_instances' => $pool_instances,
'innodb_io_capacity' => '2000',
'innodb_read_io_threads' => '16',
'innodb_write_io_threads' => '16',
'thread_pool_size' => '36',
# CACHES AND LIMITS
'tmp-table-size' => '32M',
'max-heap-table-size' => '32M',
'query_cache_size' => '0',
'query_cache_type' => '0',
'max-connections' => '1600',
'thread-cache-size' => '3200',
'open-files-limit' => '65535',
'table-definition-cache' => '4096',
'table-open-cache' => '8000',
'join_buffer_size' => '1048576',
'sort_buffer_size' => '1048576',
'tmp-table-size' => '32M',
'max-heap-table-size' => '32M',
'query_cache_size' => '0',
'query_cache_type' => '0',
'max-connections' => '1600',
'thread-cache-size' => '3200',
'open-files-limit' => '65535',
'table-definition-cache' => '4096',
'table-open-cache' => '8000',
'join_buffer_size' => '1048576',
'sort_buffer_size' => '1048576',
# LOGGING
'long_query_time' => '0.75',
'log_queries_not_using_indexes' => '0',
'slow_query_log' => '1',
'slow_query_log_file' => "/var/log/mysql/${::hostname}-slow.log",
'long_query_time' => '0.75',
'log_queries_not_using_indexes' => '0',
'slow_query_log' => '1',
'slow_query_log_file' => "/var/log/mysql/${::hostname}-slow.log",
},
}
......@@ -83,5 +83,5 @@ class percona::server::config {
},
}
$default_options = mysql_deepmerge($mysql_options, $percona_options)
$default_options = mysql_deepmerge(mysql_deepmerge({}, $mysql_options), $percona_options)
}
# Setup HAProxy
class percona::server::haproxy(
$clustername,
$wsrep_node_address,
$haproxy_global_options = {},
$haproxy_defaults_options = {},
$haproxy_backend_options = {},
$haproxy_socket = '/run/haproxy/admin.sock',
$haproxy_readonly_frontend_bind = { "${wsrep_node_address}:3307" => [] },
$haproxy_readwrite_frontend_bind = { "${wsrep_node_address}:3308" => [] },
$haproxy_balancermember_options = 'check port 9200 inter 12000 rise 3 fall 3 weight 100',
$clustername,
$wsrep_node_address,
$haproxy_global_options = {},
$haproxy_defaults_options = {},
$haproxy_backend_options = {},
$haproxy_socket = '/run/haproxy/admin.sock',
$haproxy_readonly_frontend_bind = { "${wsrep_node_address}:3307" => [] },
$haproxy_readwrite_frontend_bind = { "${wsrep_node_address}:3308" => [] },
$haproxy_balancermember_options = 'check port 9200 inter 12000 rise 3 fall 3 weight 100',
){
if $::osfamily == 'Debian' {
ensure_packages('hatop')
}
$clusternodes = getvar("::percona_cluster_${clustername}")
$clusternodes_array = split($clusternodes, ',')
if ($clusternodes_array
and $wsrep_node_address
and !empty($clusternodes_array)
and $clusternodes_array[0] == $wsrep_node_address
) {
$rw_backend = true
} else {
$rw_backend = false
}
$haproxy_default_global_options = {
'log' => [
'/var/lib/haproxy/dev/log local0',
],
'chroot' => '/var/lib/haproxy',
'stats' => "socket ${haproxy_socket} level admin mode 0660",
'user' => 'haproxy',
'group' => 'haproxy',
'ulimit-n' => '65536',
'maxconn' => '32000',
}
$haproxy_default_defaults_options = {
'log' => 'global',
'mode' => 'http',
'timeout' => [
'connect 5s',
'client 28800s',
'server 28800s',
],
'option' => [
'log-health-checks',
'dontlognull',
'tcplog',
'redispatch',
],
'retries' => 3,
'maxconn' => '32000',
}
$haproxy_default_backend_options = {
'mode' => 'tcp',
'balance' => 'roundrobin',
option => [
'tcplog',
'httpchk',
],
'fullconn' => '2048',
}
class { '::haproxy' :
global_options => deep_merge(
$haproxy_default_global_options,
$haproxy_global_options
),
defaults_options => deep_merge(
$haproxy_default_defaults_options,
$haproxy_defaults_options
),
}
Haproxy::Backend{
options => deep_merge(
$haproxy_default_backend_options,
$haproxy_backend_options
),
collect_exported => false,
require => Service['clustercheck.socket'],
}
::haproxy::frontend{"${clustername}-ro":
bind => $haproxy_readonly_frontend_bind,
mode => 'tcp',
options => {
'default_backend' => "${clustername}-ro",
},
}
::haproxy::frontend{"${clustername}-rw":
bind => $haproxy_readwrite_frontend_bind,
mode => 'tcp',
options => {
'default_backend' => "${clustername}-rw",
},
}
::haproxy::backend{"${clustername}-ro": }
::haproxy::backend{"${clustername}-rw": }
Haproxy::Balancermember<<| listening_service == "${clustername}-ro" and tag == 'bzed-percona_cluster' |>>
Haproxy::Balancermember<<| listening_service == "${clustername}-rw" and tag == 'bzed-percona_cluster' |>>
@@::haproxy::balancermember{"${::hostname}-ro":
listening_service => "${clustername}-ro",
ports => 3306,
ipaddresses => $wsrep_node_address,
server_names => $::hostname,
options => $haproxy_balancermember_options,
tag => 'bzed-percona_cluster',
}
if $rw_backend {
$rw_backup = ''
} else {
$rw_backup = 'backup'
}
@@::haproxy::balancermember{"${::hostname}-rw":
listening_service => "${clustername}-rw",
ports => 3306,
ipaddresses => $wsrep_node_address,
server_names => $::hostname,
options => "${haproxy_balancermember_options} ${rw_backup}",
tag => 'bzed-percona_cluster',
}
if $::osfamily == 'Debian' {
ensure_packages('hatop')
}
$clusternodes = getvar("::percona_cluster_${clustername}")
$clusternodes_array = split($clusternodes, ',')
if ($clusternodes_array
and $wsrep_node_address
and !empty($clusternodes_array)
and $clusternodes_array[0] == $wsrep_node_address
) {
$rw_backend = true
} else {
$rw_backend = false
}
$haproxy_default_global_options = {
'log' => [
'/var/lib/haproxy/dev/log local0',
],
'chroot' => '/var/lib/haproxy',
'stats' => "socket ${haproxy_socket} level admin mode 0660",
'user' => 'haproxy',
'group' => 'haproxy',
'ulimit-n' => '65536',
'maxconn' => '32000',
}
$haproxy_default_defaults_options = {
'log' => 'global',
'mode' => 'http',
'timeout' => [
'connect 5s',
'client 28800s',
'server 28800s',
],
'option' => [
'log-health-checks',
'dontlognull',
'dontlog-normal',
'tcplog',
'redispatch',
],
'retries' => 3,
'maxconn' => '32000',
}
$haproxy_default_backend_options = {
'mode' => 'tcp',
'balance' => 'leastconn',
option => [
'tcplog',
'httpchk',
],
'fullconn' => '2048',
}
class { '::haproxy' :
global_options => deep_merge(
$haproxy_default_global_options,
$haproxy_global_options
),
defaults_options => deep_merge(
$haproxy_default_defaults_options,
$haproxy_defaults_options
),
restart_command => '/bin/true',
}
Haproxy::Backend{
options => deep_merge(
$haproxy_default_backend_options,
$haproxy_backend_options
),
collect_exported => false,
require => Service['clustercheck.socket'],
}
::haproxy::frontend{"${clustername}-ro":
bind => $haproxy_readonly_frontend_bind,
mode => 'tcp',
options => {
'option' => [
'log-health-checks',
'dontlognull',
'dontlog-normal',
'tcplog',
],
'default_backend' => "${clustername}-ro",
},
}
::haproxy::frontend{"${clustername}-rw":
bind => $haproxy_readwrite_frontend_bind,
mode => 'tcp',
options => {
'option' => [
'tcplog',
],
'default_backend' => "${clustername}-rw",
},
}
::haproxy::backend{"${clustername}-ro":
}
::haproxy::backend{"${clustername}-rw":
}
Haproxy::Balancermember<<| listening_service == "${clustername}-ro" and tag == 'bzed-percona_cluster' |>>
Haproxy::Balancermember<<| listening_service == "${clustername}-rw" and tag == 'bzed-percona_cluster' |>>
@@::haproxy::balancermember{"${::hostname}-ro":
listening_service => "${clustername}-ro",
ports => 3306,
ipaddresses => $wsrep_node_address,
server_names => $::hostname,
options => $haproxy_balancermember_options,
tag => 'bzed-percona_cluster',
}
if $rw_backend {
#lint:ignore:empty_string_assignment
$rw_backup = ''
#lint:endignore
} else {
$rw_backup = 'backup'
}
@@::haproxy::balancermember{"${::hostname}-rw":
listening_service => "${clustername}-rw",
ports => 3306,
ipaddresses => $wsrep_node_address,
server_names => $::hostname,
options => "${haproxy_balancermember_options} ${rw_backup}",
tag => 'bzed-percona_cluster',
}
}
......@@ -11,5 +11,5 @@ class percona::server::nodes(
order => '01'
}
Concat::Fragment <<| target == $target and tag == 'bzed-percona_cluster' |>>
Concat::Fragment<<| target == $target and tag == 'bzed-percona_cluster' |>>
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment