Compare commits

...

42 Commits

Author SHA1 Message Date
1998e6e77a feat: bump 1.7.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-10-06 10:28:15 +02:00
f0cb50e797 Merge pull request 'add systemd notify support' (#14) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #14
2024-10-02 19:32:47 +02:00
3c4d6fb2cf update errors and variable names
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-10-02 19:11:44 +02:00
bdf41fa605 add systemd notify statuses
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-02 19:07:27 +02:00
ebb6e5ec6d added sd-notify dependency
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-02 17:45:26 +02:00
46eaf6017f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-28 14:50:07 +02:00
9b456d403f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-24 12:44:32 +02:00
71d640f393 updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-24 12:43:24 +02:00
0a82d46bf1 updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-11 18:01:32 +02:00
29472b4d7f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-11 17:59:36 +02:00
22214b8d55 Merge pull request 'updated dependencies - fosdem 2024 commit' (#13) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is passing
Reviewed-on: #13
2024-02-03 10:21:22 +01:00
9bae2248df updated dependencies - fosdem 2024 commit
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-02-03 10:13:00 +01:00
ecd35fd37a update to 1.6.8
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-01-04 11:05:47 +01:00
129a7e9ada updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-03 21:47:13 +01:00
1e2f047824 add ips in chunks to nftables
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-03 21:44:00 +01:00
a60ec90608 update to 1.6.7
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-26 13:14:08 +01:00
ce6ca78087 added safety in ipblc
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-26 13:13:30 +01:00
2e6e7efdbf hotfix on ws connections
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-26 11:11:38 +01:00
bae5443ca4 update to version 1.6.6
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/tag Build is passing
2023-12-26 10:49:21 +01:00
f29ccd3f0b updated ipevent with Option<IpData>
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is failing
2023-12-26 10:42:39 +01:00
6c43635c92 update to version 1.6.6
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-24 07:44:21 +01:00
1067566e9d updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-23 13:19:14 +01:00
d47a4e218d update to version 1.6.5
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-23 13:18:12 +01:00
0b67bbdab3 update to version 1.6.5
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-23 13:13:52 +01:00
809b252df7 added error handling for monitoring
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-12 22:41:21 +01:00
5d132c6380 Merge branch 'develop'
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-04 12:31:08 +01:00
80c3faec58 fix exception handling on fw.rs
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 12:18:59 +01:00
103f8ea411 Merge pull request 'update to version 1.6.4' (#11) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #11
2023-11-27 13:49:42 +01:00
104d1558b1 update to version 1.6.4
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-27 13:44:44 +01:00
ad8744a92c Merge pull request 'fix of websocket error' (#10) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #10
2023-11-25 18:01:12 +01:00
1313296acf updated dependencies
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is passing
2023-11-25 17:46:24 +01:00
46a01efeea fix return in websocket.rs/send_to_ipbl_websocket
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-25 17:35:48 +01:00
c681825efe fix error handling in websocket
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-24 21:16:07 +01:00
0806e66671 Merge pull request 'fix on monitoring' (#9) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #9
2023-11-18 13:16:32 +01:00
9187642172 fix on monitoring
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-18 13:13:25 +01:00
77ee68c081 fix on firewall rule building
All checks were successful
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is passing
2023-11-12 17:13:47 +01:00
cd67b0d602 fix on monitoring socket
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-11-12 11:00:42 +01:00
b50a2d44d7 Merge pull request 'update to 1.6.0' (#8) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #8
2023-11-10 23:46:42 +01:00
7d45f708c3 update to 1.6.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-10 23:43:09 +01:00
a654889263 more simple code
* use of some simple macros
* simplified code blocks in ctx read/write access
2023-11-10 23:43:09 +01:00
05ef0cd339 feat: update monitoring and config reload
* monitoring: added read of current config
* config: get config by single url
2023-11-10 23:43:09 +01:00
3fb83f7f77 updated .drone.yml for sccache to use webdav
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-07 19:04:01 +01:00
13 changed files with 885 additions and 699 deletions

View File

@ -20,7 +20,8 @@ steps:
- cargo test --verbose --all
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_REDIS: redis://sys01.paulbsd.com:6379/1
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes:
- name: cargo
path: /usr/local/cargo/registry
@ -43,7 +44,8 @@ steps:
- tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_REDIS: redis://sys01.paulbsd.com:6379/1
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes:
- name: cargo
path: /usr/local/cargo/registry
@ -97,7 +99,8 @@ steps:
- cargo test --verbose --all
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_REDIS: redis://sys01.paulbsd.com:6379/1
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes:
- name: cargo
path: /usr/local/cargo/registry
@ -120,7 +123,8 @@ steps:
- tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_REDIS: redis://sys01.paulbsd.com:6379/1
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes:
- name: cargo
path: /usr/local/cargo/registry

835
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "ipblc"
version = "1.5.0"
version = "1.7.0"
edition = "2021"
authors = ["PaulBSD <paul@paulbsd.com>"]
description = "ipblc is a tool that search and send attacking ip addresses to ipbl"
@ -21,8 +21,9 @@ regex = "1.10"
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.33", features = ["full", "sync"] }
tungstenite = { version = "0.20", features = ["handshake", "rustls-tls-native-roots"] }
sd-notify = { version = "0.4" }
tokio = { version = "1.35", features = ["full", "sync"] }
tungstenite = { version = "0.21", features = ["handshake", "rustls-tls-native-roots"] }
## to optimize binary size (slow compile time)
#[profile.release]

View File

@ -48,6 +48,7 @@ Options:
- ✅ Local bound tcp api socket
- ✅ ZMQ -> Websocket
- ✅ Bug in RwLocks (agent often give up)
- ❌ Create memory friendly structs for ipdata
### Notes

View File

@ -17,7 +17,7 @@ use std::path::Path;
pub const GIT_VERSION: &str = git_version!(args = ["--always", "--dirty="]);
const MASTERSERVER: &str = "ipbl.paulbsd.com";
const WSSUBSCRIPTION: &str = "ipbl";
const CONFIG_RETRY: u64 = 2;
const CONFIG_RETRY_INTERVAL: u64 = 2;
const WEB_CLIENT_TIMEOUT: i64 = 5;
#[derive(Debug)]
@ -28,6 +28,7 @@ pub struct Context {
pub flags: Flags,
pub sas: HashMap<String, SetMap>,
pub hashwd: HashMap<String, WatchDescriptor>,
pub reloadinterval: isize,
}
#[derive(Debug, Clone)]
@ -35,7 +36,7 @@ pub struct SetMap {
pub filename: String,
pub fullpath: String,
pub regex: Regex,
pub set: Set,
pub set: SetCfg,
pub watchedfiles: HashMap<String, u64>,
pub wd: WatchDescriptor,
}
@ -64,6 +65,7 @@ impl Context {
sas: HashMap::new(),
blocklist: HashMap::new(),
hashwd: HashMap::new(),
reloadinterval: 5,
};
print!("Loading config ... ");
@ -101,12 +103,12 @@ impl Context {
.send()
.await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
Ok(o) => o,
Err(e) => return Err(e),
};
let data: Discovery = match req.json().await {
Ok(res) => res,
Err(err) => return Err(err),
Ok(o) => o,
Err(e) => return Err(e),
};
Ok(data)
}
@ -125,10 +127,10 @@ impl Context {
}
break;
}
Err(err) => {
println!("error loading config: {err}, retrying in {CONFIG_RETRY}s");
Err(e) => {
println!("error loading config: {e}, retrying in {CONFIG_RETRY_INTERVAL}s");
last_in_err = true;
sleep_s(CONFIG_RETRY).await;
sleep_s(CONFIG_RETRY_INTERVAL).await;
}
};
}
@ -167,38 +169,41 @@ impl Context {
}
pub async fn update_blocklist(&mut self, ipevent: &IpEvent) -> Option<IpEvent> {
match self.cfg.sets.get(&ipevent.ipdata.src) {
Some(set) => {
let starttime = DateTime::parse_from_rfc3339(ipevent.ipdata.date.as_str())
.unwrap()
.with_timezone(&chrono::Local);
let blocktime = set.blocktime;
if ipevent.mode == "file".to_string() && gethostname(true) == ipevent.hostname {
let block = self
.blocklist
.entry(ipevent.ipdata.ip.to_string())
.or_insert(BlockIpData {
ipdata: ipevent.ipdata.clone(),
tryfail: 0,
starttime,
blocktime,
});
block.tryfail += 1;
block.blocktime = blocktime;
if block.tryfail >= set.tryfail {
return Some(ipevent.clone());
match &ipevent.ipdata {
Some(ipdata) => match self.cfg.sets.get(&ipdata.src) {
Some(set) => {
let starttime = DateTime::parse_from_rfc3339(ipdata.date.as_str())
.unwrap()
.with_timezone(&chrono::Local);
let blocktime = set.blocktime;
if ipevent.mode == "file".to_string() && gethostname(true) == ipevent.hostname {
let block =
self.blocklist
.entry(ipdata.ip.to_string())
.or_insert(BlockIpData {
ipdata: ipdata.clone(),
tryfail: 0,
starttime,
blocktime,
});
block.tryfail += 1;
block.blocktime = blocktime;
if block.tryfail >= set.tryfail {
return Some(ipevent.clone());
}
} else {
self.blocklist
.entry(ipdata.ip.to_string())
.or_insert(BlockIpData {
ipdata: ipdata.clone(),
tryfail: set.tryfail,
starttime,
blocktime,
});
}
} else {
self.blocklist
.entry(ipevent.ipdata.ip.to_string())
.or_insert(BlockIpData {
ipdata: ipevent.ipdata.clone(),
tryfail: set.tryfail,
starttime,
blocktime,
});
}
}
None => {}
},
None => {}
}
None
@ -282,7 +287,7 @@ impl Context {
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Config {
pub sets: HashMap<String, Set>,
pub sets: HashMap<String, SetCfg>,
#[serde(skip_serializing)]
pub trustnets: Vec<String>,
pub ws: HashMap<String, WebSocketCfg>,
@ -294,7 +299,7 @@ impl Config {
Self {
sets: HashMap::from([
("smtp".to_string(),
Set {
SetCfg {
src: "smtp".to_string(),
filename: "mail.log".to_string(),
regex: "(SASL LOGIN authentication failed)".to_string(),
@ -303,7 +308,7 @@ impl Config {
tryfail: 5,
}),
("ssh".to_string(),
Set {
SetCfg {
src: "ssh".to_string(),
filename: "auth.log".to_string(),
regex: "(Invalid user|BREAK|not allowed because|no matching key exchange method found)".to_string(),
@ -312,7 +317,7 @@ impl Config {
tryfail: 5,
},),
("http".to_string(),
Set {
SetCfg {
src: "http".to_string(),
filename: "".to_string(),
regex: "(anonymousfox.co)".to_string(),
@ -321,7 +326,7 @@ impl Config {
tryfail: 5,
},),
("openvpn".to_string(),
Set {
SetCfg {
src: "openvpn".to_string(),
filename: "status".to_string(),
regex: "(UNDEF)".to_string(),
@ -350,96 +355,40 @@ impl Config {
}
pub async fn load(&mut self, server: &String) -> Result<(), ReqError> {
self.get_global_config(server).await?;
self.get_trustnets(server).await?;
self.get_sets(server).await?;
self.get_ws_config(server).await?;
self.get_config(server).await?;
Ok(())
}
async fn get_global_config(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> =
httpclient().get(format!("{server}/config")).send().await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
};
let data: HashMap<String, GlobalConfig> = match req.json::<Vec<GlobalConfig>>().await {
Ok(res) => {
let mut out: HashMap<String, GlobalConfig> = HashMap::new();
res.into_iter().map(|x| x).for_each(|x| {
out.insert(x.key.to_string(), x);
});
out
}
Err(err) => return Err(err),
};
let key = "".to_string();
self.api = data
.get(&key.to_string())
.unwrap_or(&GlobalConfig {
key: "api".to_string(),
value: "127.0.0.1:8060".to_string(),
})
.value
.clone();
Ok(())
}
async fn get_trustnets(&mut self, server: &String) -> Result<(), ReqError> {
async fn get_config(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> = httpclient()
.get(format!("{server}/config/trustlist"))
.get(format!("{server}/config?v=2"))
.send()
.await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
Ok(o) => o,
Err(e) => return Err(e),
};
let data: Vec<String> = match req.json::<Vec<String>>().await {
Ok(res) => res,
Err(err) => return Err(err),
let data: GlobalConfigV2 = match req.json::<GlobalConfigV2>().await {
Ok(o) => o,
Err(e) => return Err(e),
};
self.trustnets = data;
Ok(())
}
async fn get_sets(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> = httpclient()
.get(format!("{server}/config/sets"))
.send()
.await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
};
let data: Vec<Set> = match req.json::<Vec<Set>>().await {
Ok(res) => res,
Err(err) => return Err(err),
};
for d in data {
for d in data.sets {
self.sets.insert(d.src.clone(), d);
}
Ok(())
}
async fn get_ws_config(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> =
httpclient().get(format!("{server}/config/ws")).send().await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
};
let data: HashMap<String, WebSocketCfg> = match req.json::<Vec<WebSocketCfg>>().await {
Ok(res) => {
let mut out: HashMap<String, WebSocketCfg> = HashMap::new();
res.into_iter().map(|x| x).for_each(|x| {
out.insert(x.t.to_string(), x);
});
out
}
Err(err) => return Err(err),
};
self.ws = data;
self.trustnets = data.trustlists;
data.ws.into_iter().map(|x| x).for_each(|x| {
self.ws.insert(x.t.to_string(), x);
});
self.api = data
.cfg
.get(&"api".to_string())
.unwrap_or(&self.api)
.clone();
Ok(())
}
@ -451,13 +400,13 @@ impl Config {
.await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
Ok(o) => o,
Err(e) => return Err(e),
};
let data: Vec<IpData> = match req.json::<Vec<IpData>>().await {
Ok(res) => res,
Err(err) => return Err(err),
Ok(o) => o,
Err(e) => return Err(e),
};
Ok(data)
@ -468,8 +417,8 @@ impl Config {
for trustnet in &self.trustnets {
match trustnet.parse() {
Ok(net) => trustnets.push(net),
Err(err) => {
println!("error parsing {trustnet}, error: {err}");
Err(e) => {
println!("error parsing {trustnet}, error: {e}");
}
};
}
@ -481,13 +430,7 @@ impl Config {
msgtype: String::from("bootstrap"),
mode: String::from("ws"),
hostname: gethostname(true),
ipdata: IpData {
t: 4,
ip: "".to_string(),
src: "".to_string(),
date: "".to_string(),
hostname: "".to_string(),
},
ipdata: None,
}
}
}
@ -508,13 +451,15 @@ pub fn httpclient() -> Client {
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GlobalConfig {
pub key: String,
pub value: String,
pub struct GlobalConfigV2 {
pub cfg: HashMap<String, String>,
pub sets: Vec<SetCfg>,
pub trustlists: Vec<String>,
pub ws: Vec<WebSocketCfg>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Set {
pub struct SetCfg {
pub src: String,
pub filename: String,
pub regex: String,
@ -543,13 +488,13 @@ pub struct URL {
pub path: String,
}
impl PartialEq for Set {
impl PartialEq for SetCfg {
fn eq(&self, other: &Self) -> bool {
self.src == other.src
}
}
impl Hash for Set {
impl Hash for SetCfg {
fn hash<H: Hasher>(&self, state: &mut H) {
self.src.hash(state);
}
@ -573,13 +518,13 @@ mod test {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 4,
ip: "1.1.1.1".to_string(),
hostname: "test1".to_string(),
date: now.to_rfc3339().to_string(),
src: "ssh".to_string(),
},
}),
})
.await;
}
@ -589,13 +534,13 @@ mod test {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 4,
ip: "1.1.1.2".to_string(),
hostname: "test2".to_string(),
date: now.to_rfc3339().to_string(),
src: "http".to_string(),
},
}),
})
.await;
}
@ -604,13 +549,13 @@ mod test {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 4,
ip: "1.1.1.3".to_string(),
hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(),
src: "http".to_string(),
},
}),
})
.await;
@ -618,13 +563,13 @@ mod test {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 4,
ip: "1.1.1.4".to_string(),
hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(),
src: "http".to_string(),
},
}),
})
.await;
@ -632,26 +577,26 @@ mod test {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 4,
ip: "1.1.1.4".to_string(),
hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(),
src: "http".to_string(),
},
}),
})
.await;
ctx.update_blocklist(&mut IpEvent {
msgtype: String::from("add"),
mode: String::from("ws"),
hostname: String::from("localhost"),
ipdata: IpData {
ipdata: Some(IpData {
t: 6,
ip: "2a00:1450:4007:805::2003".to_string(),
hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(),
src: "http".to_string(),
},
}),
})
.await;

133
src/fw.rs
View File

@ -19,6 +19,41 @@ pub fn fwglobalinit<'a>() -> ((Batch, Table), (Batch, Table)) {
((batch4, table4), (batch6, table6))
}
macro_rules! initrules {
($batch:expr, $table:expr, $chain:ident) => {
$chain.set_hook(nftnl::Hook::In, 1);
$chain.set_policy(nftnl::Policy::Accept);
$batch.add(&$chain, nftnl::MsgType::Add);
$batch.add(&Rule::new(&$chain), nftnl::MsgType::Del);
let mut rule = Rule::new(&$chain);
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict accept));
$batch.add(&rule, nftnl::MsgType::Add);
};
}
macro_rules! createrules {
($ipdata:ident, $chain:ident, $batch:ident, $t:ty, $ip_t:ident) => {
let mut rule = Rule::new(&$chain);
let ip = $ipdata.ip.parse::<$t>().unwrap();
rule.add_expr(&nft_expr!(payload $ip_t saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
$batch.add(&rule, nftnl::MsgType::Add);
}
}
fn fwinit(t: FwTableType) -> (Batch, Table) {
let table_name: String;
let table: Table;
@ -48,77 +83,41 @@ fn fwinit(t: FwTableType) -> (Batch, Table) {
}
pub fn fwblock(
ips_add: &Vec<IpData>,
ips_add_all: &Vec<IpData>,
ret: &mut Vec<String>,
fwlen: &mut usize,
) -> std::result::Result<(), Error> {
let ((mut batch4, table4), (mut batch6, table6)) = fwglobalinit();
// build chain for ipv4
let mut chain4 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table4);
chain4.set_hook(nftnl::Hook::In, 1);
chain4.set_policy(nftnl::Policy::Accept);
// add chain
batch4.add(&chain4, nftnl::MsgType::Add);
batch4.add(&Rule::new(&chain4), nftnl::MsgType::Del);
let mut rule4 = Rule::new(&chain4);
rule4.add_expr(&nft_expr!(ct state));
rule4.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32));
rule4.add_expr(&nft_expr!(cmp != 0u32));
rule4.add_expr(&nft_expr!(counter));
rule4.add_expr(&nft_expr!(verdict accept));
batch4.add(&rule4, nftnl::MsgType::Add);
// build chain for ipv6
let mut chain6 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table6);
chain6.set_hook(nftnl::Hook::In, 1);
chain6.set_policy(nftnl::Policy::Accept);
// add chain
batch6.add(&chain6, nftnl::MsgType::Add);
initrules!(batch4, table4, chain4);
initrules!(batch6, table6, chain6);
batch6.add(&Rule::new(&chain6), nftnl::MsgType::Del);
let mut factor = 1;
if ips_add_all.len() > 100 {
factor = (ips_add_all.len() / 10) as usize
}
let mut rule6 = Rule::new(&chain6);
rule6.add_expr(&nft_expr!(ct state));
rule6.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32));
rule6.add_expr(&nft_expr!(cmp != 0u32));
rule6.add_expr(&nft_expr!(counter));
rule6.add_expr(&nft_expr!(verdict accept));
batch6.add(&rule6, nftnl::MsgType::Add);
let ips_add_tmp: Vec<IpData> = ips_add_all.clone().iter().map(|x| x.clone()).collect();
let mut ips_add_iter = ips_add_tmp.chunks(factor);
let mut ips_add: Vec<&[IpData]> = vec![];
while let Some(x) = ips_add_iter.next() {
ips_add.push(x);
}
// build and add rules
for ipdata in ips_add.clone() {
match ipdata.t {
4 => {
let ip = ipdata.ip.parse::<Ipv4Addr>().unwrap();
let mut rule = Rule::new(&chain4);
rule.add_expr(&nft_expr!(payload ipv4 saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
batch4.add(&rule, nftnl::MsgType::Add);
}
6 => {
let ip = ipdata.ip.parse::<Ipv6Addr>().unwrap();
let mut rule = Rule::new(&chain6);
rule.add_expr(&nft_expr!(payload ipv6 saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
batch6.add(&rule, nftnl::MsgType::Add);
}
_ => {
todo!()
for ipdata_group in ips_add.clone() {
for ipdata in ipdata_group {
match ipdata.t {
4 => {
createrules!(ipdata, chain4, batch4, Ipv4Addr, ipv4);
}
6 => {
createrules!(ipdata, chain6, batch6, Ipv6Addr, ipv6);
}
_ => {}
}
}
}
@ -126,12 +125,20 @@ pub fn fwblock(
// validate and send batch
for b in [batch4, batch6] {
let bf = b.finalize();
send_and_process(&bf).unwrap();
match send_and_process(&bf) {
Ok(_) => {}
Err(e) => {
println!("error sending batch: {e}");
}
};
}
if fwlen != &mut ips_add.len() {
ret.push(format!("{length} ip in firewall", length = ips_add.len()));
if fwlen != &mut ips_add_all.len() {
ret.push(format!(
"{length} ip in firewall",
length = ips_add_all.len()
));
}
*fwlen = ips_add.len();
*fwlen = ips_add_all.len();
Ok(())
}

View File

@ -22,7 +22,7 @@ pub struct IpEvent {
pub msgtype: String,
pub mode: String,
pub hostname: String,
pub ipdata: IpData,
pub ipdata: Option<IpData>,
}
#[macro_export]
@ -35,6 +35,14 @@ macro_rules! ipevent {
ipdata: $ipdata,
}
};
($msgtype:expr,$mode:expr,$hostname:expr) => {
IpEvent {
msgtype: String::from($msgtype),
mode: String::from($mode),
hostname: $hostname,
ipdata: None,
}
};
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -54,6 +62,19 @@ pub struct IpData {
pub hostname: String,
}
#[macro_export]
macro_rules! ipdata {
($t:expr,$ip:expr,$src:expr,$date:expr,$hostname:expr) => {
IpData {
t: $t.clone(),
ip: $ip.clone(),
src: $src.clone(),
date: $date.clone(),
hostname: $hostname.clone(),
}
};
}
impl PartialEq for IpData {
fn eq(&self, other: &IpData) -> bool {
self.ip.as_bytes() == other.ip.as_bytes() && self.src == other.src
@ -123,6 +144,14 @@ pub fn filter(
}
};
let ipaddr: IpAddr = match s_ipaddr.parse() {
Ok(ip) => ip,
Err(err) => {
println!("unparseable IP: {err} {s_ipaddr}");
continue;
}
};
let s_date: DateTime<Local>;
match R_DATE.captures(l.as_str()) {
Some(sdt) => {
@ -136,22 +165,8 @@ pub fn filter(
}
};
let ipaddr: IpAddr = match s_ipaddr.parse() {
Ok(ip) => ip,
Err(err) => {
println!("unparseable IP: {err} {s_ipaddr}");
continue;
}
};
if !is_trusted(&ipaddr, &trustnets) {
iplist.push(IpData {
ip: s_ipaddr,
t: t,
src: src.to_owned(),
date: s_date.to_rfc3339().to_owned(),
hostname: hostname.to_owned(),
});
iplist.push(ipdata!(t, s_ipaddr, src, s_date.to_rfc3339(), hostname));
ips += 1;
};
}

View File

@ -11,6 +11,7 @@ use chrono::prelude::*;
use chrono::prelude::{DateTime, Local};
use chrono::Duration;
use nix::sys::inotify::{InitFlags, Inotify, InotifyEvent};
use sd_notify::*;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::{channel, Receiver, Sender};
@ -21,6 +22,13 @@ const BL_CHAN_SIZE: usize = 32;
const WS_CHAN_SIZE: usize = 64;
const LOOP_MAX_WAIT: u64 = 5;
macro_rules! log_with_systemd {
($msg:expr) => {
println!("{}", $msg);
notify(false, &[NotifyState::Status(format!("{}", $msg).as_str())]).unwrap();
};
}
pub async fn run() {
let inotify = Inotify::init(InitFlags::empty()).unwrap();
let globalctx = Context::new(&inotify).await;
@ -31,7 +39,7 @@ pub async fn run() {
let pkgversion = format!("{}@{}", env!("CARGO_PKG_VERSION"), GIT_VERSION);
let mut last_cfg_reload: DateTime<Local> = Local::now().trunc_subsecs(0);
println!("Launching {}, version {}", PKG_NAME, pkgversion);
log_with_systemd!(format!("Launching {}, version {}", PKG_NAME, pkgversion));
fwglobalinit();
let ctxapi = Arc::clone(&ctxarc);
@ -60,6 +68,8 @@ pub async fn run() {
compare_files_changes(&ctxclone, &mut blrx, &ipeventclone).await;
});
notify(false, &[NotifyState::Ready]).unwrap();
loop {
let mut ret: Vec<String> = Vec::new();
@ -69,17 +79,16 @@ pub async fn run() {
ipevent = ipeventrx.recv() => {
let received_ip = ipevent.unwrap();
let (toblock,server);
{
let (toblock,server) = {
let ctx = ctxclone.read().await;
toblock = ctx.get_blocklist_toblock().await;
server = ctx.flags.server.clone();
}
(ctx.get_blocklist_toblock().await,ctx.flags.server.clone())
};
if received_ip.msgtype == "bootstrap".to_string() {
for ip_to_send in toblock {
let ipe = ipevent!("init","ws",gethostname(true),ip_to_send);
let ipe = ipevent!("init","ws",gethostname(true),Some(ip_to_send));
if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await;
break;
}
@ -88,27 +97,31 @@ pub async fn run() {
}
// refresh context blocklist
let filtered_ipevent;
{
let mut ctx = ctxarc.write().await;
filtered_ipevent = ctx.update_blocklist(&received_ip).await;
}
let filtered_ipevent = {
ctxarc.write().await.update_blocklist(&received_ip).await
};
// send ip list to api and ws sockets
if let Some(ipevent) = filtered_ipevent {
if received_ip.msgtype != "init" {
println!("sending {} to api and ws", ipevent.ipdata.ip);
log_with_systemd!(format!("sending {} to api and ws", ipevent.ipdata.clone().unwrap().ip));
let ipe = ipevent!("add","ws",gethostname(true),ipevent.ipdata);
send_to_ipbl_api(&server.clone(), &ipe).await;
let status = send_to_ipbl_websocket(&mut wssocketrr, &ipe).await;
if !status {
if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await;
continue;
}
}
}
}
_val = sleep_s(LOOP_MAX_WAIT) => {}
_val = sleep_s(LOOP_MAX_WAIT) => {
let ipe = ipevent!("ping", "ws", gethostname(true));
if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await;
}
}
};
let ctxclone = Arc::clone(&ctxarc);
@ -116,7 +129,8 @@ pub async fn run() {
// log lines
if ret.len() > 0 {
println!("{ret}", ret = ret.join(", "));
let result = ret.join(", ");
log_with_systemd!(format!("{result}"));
}
let ctxclone = Arc::clone(&ctxarc);
@ -132,9 +146,26 @@ async fn handle_cfg_reload(
) {
let now_cfg_reload = Local::now().trunc_subsecs(0);
if (now_cfg_reload - *last_cfg_reload) > Duration::seconds(LOOP_MAX_WAIT as i64) {
let mut ctx = ctxclone.write().await;
let inotify = inoarc.read().await;
match ctx.load(&inotify).await {
let inotify;
loop {
inotify = match inoarc.try_read() {
Ok(o) => o,
Err(e) => {
println!("{e}");
sleep_s(1).await;
continue;
}
};
break;
}
let mut ctxtest = match ctxclone.try_write() {
Ok(o) => o,
Err(e) => {
println!("{e}");
return;
}
};
match ctxtest.load(&inotify).await {
Ok(_) => {
*last_cfg_reload = Local::now().trunc_subsecs(0);
}
@ -158,8 +189,8 @@ async fn handle_fwblock(ctxclone: Arc<RwLock<Context>>, ret: &mut Vec<String>, f
// apply firewall blocking
match fwblock(&toblock, ret, fwlen) {
Ok(_) => {}
Err(err) => {
println!("Err: {err}, unable to push firewall rules, use super user")
Err(e) => {
println!("err: {e}, unable to push firewall rules, use super user")
}
};
}
@ -201,20 +232,16 @@ async fn compare_files_changes(
let modfiles = inrx.recv().await.unwrap();
let mut iplist: Vec<IpData> = vec![];
let sask;
let sas;
{
let sas = {
let ctx = ctxarc.read().await;
sas = ctx.sas.clone();
sask = sas.keys();
tnets = ctx.cfg.build_trustnets();
}
ctx.sas.clone()
};
match modfiles.inevent.name {
Some(name) => {
let filename = name.to_str().unwrap();
for sak in sask {
let sa = sas.get(sak).unwrap();
for (sak, sa) in sas.clone().iter_mut() {
if modfiles.inevent.wd == sa.wd {
let handle: String;
if sa.filename.as_str() == "" {
@ -225,13 +252,11 @@ async fn compare_files_changes(
continue;
}
let (filesize, sizechanged);
{
let (filesize, sizechanged) = {
let mut ctx = ctxarc.write().await;
let sa = ctx.sas.get_mut(sak).unwrap();
(filesize, sizechanged) =
get_last_file_size(&mut sa.watchedfiles, &handle).await;
}
get_last_file_size(&mut sa.watchedfiles, &handle).await
};
if !sizechanged {
continue;
@ -254,7 +279,7 @@ async fn compare_files_changes(
}
}
for ip in iplist {
let ipe = ipevent!("add", "file", gethostname(true), ip);
let ipe = ipevent!("add", "file", gethostname(true), Some(ip));
let ipetx = ipeventtx.read().await;
ipetx.send(ipe).await.unwrap();
}

View File

@ -4,53 +4,72 @@ use serde_json;
use std::io;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpSocket;
use tokio::net::TcpListener;
use tokio::sync::RwLock;
pub async fn apiserver(ctxarc: &Arc<RwLock<Context>>) -> io::Result<()> {
let ctxarc = ctxarc.clone();
let addr;
{
let ctx = ctxarc.read().await;
addr = ctx.cfg.api.parse().unwrap();
}
let socket = TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let listener = socket.listen(1024).unwrap();
let addr: String = { ctxarc.read().await.cfg.api.parse().unwrap() };
let listener = match TcpListener::bind(addr).await {
Ok(o) => o,
Err(e) => {
println!("error: {e}");
std::process::exit(1);
}
};
tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, _addr)) => {
//let mut buf = [0; 1024];
let data;
{
let ctx = ctxarc.read().await;
data = serde_json::to_string(&ctx.blocklist);
Ok((mut socket, _addr)) => {
let mut buf = vec![0; 1024];
match socket.readable().await {
Ok(_) => {
match socket.try_read(&mut buf) {
Ok(_) => {}
Err(e) => {
println!("error: {e}");
continue;
}
};
}
Err(e) => {
println!("error: {e}");
continue;
}
}
match data {
Ok(dt) => {
let (_reader, mut writer) = stream.into_split();
match writer.write_all(format!("{dt}").as_bytes()).await {
Ok(_) => {}
Err(err) => {
println!("{err}");
}
}
}
Err(err) => {
println!("unable to serialize data: {err}");
let msg = match String::from_utf8(buf.to_vec()) {
Ok(o) => o.trim_matches(char::from(0)).trim().to_string(),
Err(_) => "".to_string(),
};
let res = format_result(&ctxarc, msg.as_str()).await;
match socket.write_all(res.as_bytes()).await {
Ok(_) => {}
Err(e) => {
println!("error: {e}");
}
}
}
Err(err) => {
println!("couldn't get client: {}", err)
println!("error: {err}");
}
}
}
});
Ok(())
}
async fn format_result(ctxarc: &Arc<RwLock<Context>>, mode: &str) -> String {
let data;
let ctx = ctxarc.read().await;
match mode {
"cfg" => data = serde_json::to_string(&ctx.cfg).unwrap(),
"blocklist" => data = serde_json::to_string(&ctx.blocklist).unwrap(),
_ => data = serde_json::to_string(&ctx.blocklist).unwrap(),
};
data
}

28
src/old.rs Normal file
View File

@ -0,0 +1,28 @@
pub fn _search_subfolders(path: &Path) -> Vec<String> {
let dirs = std::fs::read_dir(path).unwrap();
let mut folders: Vec<String> = vec![];
for dir in dirs {
let dirpath = dir.unwrap().path();
let path = Path::new(dirpath.as_path());
if path.is_dir() {
folders.push(dirpath.to_str().unwrap().to_string());
for f in _search_subfolders(path) {
folders.push(f);
}
}
}
folders
}
pub fn _dedup<T: Ord + PartialOrd>(list: &mut Vec<T>) -> usize {
// Begin with sorting entries
list.sort();
// Then deduplicate
list.dedup();
// Return the length
list.len()
}
pub async fn _sleep_ms(ms: u64) {
sleep(Duration::from_millis(ms)).await;
}

View File

@ -1,21 +1,14 @@
use lazy_static::lazy_static;
use nix::unistd;
use regex::Regex;
use std::boxed::Box;
use std::fs::File;
use std::io::*;
use std::path::Path;
use tokio::time::{sleep, Duration};
lazy_static! {
static ref R_FILE_GZIP: Regex = Regex::new(r".*\.gz.*").unwrap();
}
pub fn read_lines(filename: &String, offset: u64) -> Option<Box<dyn Read>> {
let mut file = match File::open(filename) {
Ok(f) => f,
Err(err) => {
println!("{err}");
Ok(o) => o,
Err(e) => {
println!("error: {e}");
return None;
}
};
@ -24,23 +17,15 @@ pub fn read_lines(filename: &String, offset: u64) -> Option<Box<dyn Read>> {
Some(lines)
}
pub fn _dedup<T: Ord + PartialOrd>(list: &mut Vec<T>) -> usize {
// Begin with sorting entries
list.sort();
// Then deduplicate
list.dedup();
// Return the length
list.len()
}
pub async fn _sleep_ms(ms: u64) {
sleep(Duration::from_millis(ms)).await;
}
pub async fn sleep_s(s: u64) {
sleep(Duration::from_secs(s)).await;
}
#[allow(dead_code)]
pub async fn sleep_ms(m: u64) {
sleep(Duration::from_millis(m)).await;
}
pub fn gethostname(show_fqdn: bool) -> String {
let hostname_cstr = unistd::gethostname().expect("Failed getting hostname");
let fqdn = hostname_cstr
@ -53,19 +38,3 @@ pub fn gethostname(show_fqdn: bool) -> String {
}
hostname[0].to_string()
}
pub fn _search_subfolders(path: &Path) -> Vec<String> {
let dirs = std::fs::read_dir(path).unwrap();
let mut folders: Vec<String> = vec![];
for dir in dirs {
let dirpath = dir.unwrap().path();
let path = Path::new(dirpath.as_path());
if path.is_dir() {
folders.push(dirpath.to_str().unwrap().to_string());
for f in _search_subfolders(path) {
folders.push(f);
}
}
}
folders
}

View File

@ -11,12 +11,12 @@ pub async fn send_to_ipbl_api(server: &str, ip: &IpEvent) {
let mut try_req = 0;
let client = httpclient();
loop {
match push_ip(&client, &server, &ip.ipdata).await {
match push_ip(&client, &server, &ip.ipdata.clone().unwrap()).await {
Ok(_) => {
break;
}
Err(err) => {
println!("{err}");
Err(e) => {
println!("error: {e}");
sleep_s(1).await;
if try_req == MAX_FAILED_API_RATE {
break;

View File

@ -14,13 +14,13 @@ use tungstenite::*;
pub async fn websocketreqrep(
ctxarc: &Arc<RwLock<Context>>,
) -> WebSocket<MaybeTlsStream<TcpStream>> {
let (mut wssocketrr, bootstrap_event, cfg);
let (mut wssocketrr, bootstrap_event, wscfg);
{
let ctx = ctxarc.read().await;
bootstrap_event = ctx.cfg.bootstrap_event().clone();
cfg = ctx.cfg.ws.get("reqrep").unwrap().clone();
wscfg = ctx.cfg.ws.get("reqrep").unwrap().clone();
}
wssocketrr = websocketconnect(&cfg, &gethostname(true)).await.unwrap();
wssocketrr = websocketconnect(&wscfg, &gethostname(true)).await.unwrap();
send_to_ipbl_websocket(&mut wssocketrr, &bootstrap_event).await;
return wssocketrr;
@ -45,15 +45,24 @@ pub async fn websocketpubsub(
Ok(msg) => {
let tosend: IpEvent = match serde_json::from_str(msg.to_string().as_str()) {
Ok(o) => o,
Err(_e) => {
Err(e) => {
println!("error in pubsub: {e:?}");
continue;
}
};
if tosend.ipdata.hostname != gethostname(true)
|| tosend.msgtype == "init".to_string()
{
let txps = txpubsub.read().await;
txps.send(tosend).await.unwrap();
match tosend.ipdata.clone() {
Some(o) => {
if o.hostname != gethostname(true)
|| tosend.msgtype == "init".to_string()
{
let txps = txpubsub.read().await;
txps.send(tosend).await.unwrap();
}
}
None => {
let txps = txpubsub.read().await;
txps.send(tosend.clone()).await.unwrap();
}
}
}
Err(e) => {
@ -105,11 +114,12 @@ pub async fn send_to_ipbl_websocket(
Ok(_) => {}
Err(e) => {
println!("err send read: {e:?}");
return handle_websocket_error(ws);
return false;
}
};
} else {
return handle_websocket_error(ws);
println!("can't write to socket");
return false;
};
if ws.can_read() {
@ -117,16 +127,13 @@ pub async fn send_to_ipbl_websocket(
Ok(_) => {}
Err(e) => {
println!("err send read: {e:?}");
return handle_websocket_error(ws);
return false;
}
};
} else {
return handle_websocket_error(ws);
println!("can't read from socket");
return false;
};
true
}
fn handle_websocket_error(ws: &mut WebSocket<MaybeTlsStream<TcpStream>>) -> bool {
ws.close(None).unwrap();
return false;
}