Compare commits

...

46 Commits

Author SHA1 Message Date
1998e6e77a feat: bump 1.7.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-10-06 10:28:15 +02:00
f0cb50e797 Merge pull request 'add systemd notify support' (#14) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #14
2024-10-02 19:32:47 +02:00
3c4d6fb2cf update errors and variable names
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-10-02 19:11:44 +02:00
bdf41fa605 add systemd notify statuses
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-02 19:07:27 +02:00
ebb6e5ec6d added sd-notify dependency
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-02 17:45:26 +02:00
46eaf6017f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-28 14:50:07 +02:00
9b456d403f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-24 12:44:32 +02:00
71d640f393 updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-24 12:43:24 +02:00
0a82d46bf1 updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-11 18:01:32 +02:00
29472b4d7f updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-11 17:59:36 +02:00
22214b8d55 Merge pull request 'updated dependencies - fosdem 2024 commit' (#13) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is passing
Reviewed-on: #13
2024-02-03 10:21:22 +01:00
9bae2248df updated dependencies - fosdem 2024 commit
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-02-03 10:13:00 +01:00
ecd35fd37a update to 1.6.8
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-01-04 11:05:47 +01:00
129a7e9ada updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-03 21:47:13 +01:00
1e2f047824 add ips in chunks to nftables
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-03 21:44:00 +01:00
a60ec90608 update to 1.6.7
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-26 13:14:08 +01:00
ce6ca78087 added safety in ipblc
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-26 13:13:30 +01:00
2e6e7efdbf hotfix on ws connections
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-26 11:11:38 +01:00
bae5443ca4 update to version 1.6.6
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/tag Build is passing
2023-12-26 10:49:21 +01:00
f29ccd3f0b updated ipevent with Option<IpData>
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is failing
2023-12-26 10:42:39 +01:00
6c43635c92 update to version 1.6.6
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-24 07:44:21 +01:00
1067566e9d updated dependencies
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-23 13:19:14 +01:00
d47a4e218d update to version 1.6.5
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-23 13:18:12 +01:00
0b67bbdab3 update to version 1.6.5
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-23 13:13:52 +01:00
809b252df7 added error handling for monitoring
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-12 22:41:21 +01:00
5d132c6380 Merge branch 'develop'
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-12-04 12:31:08 +01:00
80c3faec58 fix exception handling on fw.rs
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 12:18:59 +01:00
103f8ea411 Merge pull request 'update to version 1.6.4' (#11) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #11
2023-11-27 13:49:42 +01:00
104d1558b1 update to version 1.6.4
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-27 13:44:44 +01:00
ad8744a92c Merge pull request 'fix of websocket error' (#10) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #10
2023-11-25 18:01:12 +01:00
1313296acf updated dependencies
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is passing
2023-11-25 17:46:24 +01:00
46a01efeea fix return in websocket.rs/send_to_ipbl_websocket
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-25 17:35:48 +01:00
c681825efe fix error handling in websocket
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-24 21:16:07 +01:00
0806e66671 Merge pull request 'fix on monitoring' (#9) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #9
2023-11-18 13:16:32 +01:00
9187642172 fix on monitoring
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-18 13:13:25 +01:00
77ee68c081 fix on firewall rule building
All checks were successful
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is passing
2023-11-12 17:13:47 +01:00
cd67b0d602 fix on monitoring socket
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-11-12 11:00:42 +01:00
b50a2d44d7 Merge pull request 'update to 1.6.0' (#8) from develop into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #8
2023-11-10 23:46:42 +01:00
7d45f708c3 update to 1.6.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-10 23:43:09 +01:00
a654889263 more simple code
* use of some simple macros
* simplified code blocks in ctx read/write access
2023-11-10 23:43:09 +01:00
05ef0cd339 feat: update monitoring and config reload
* monitoring: added read of current config
* config: get config by single url
2023-11-10 23:43:09 +01:00
3fb83f7f77 updated .drone.yml for sccache to use webdav
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-07 19:04:01 +01:00
59ad4a6624 updated .drone.yml
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-11-04 12:48:02 +01:00
db7001c749 fix typo in .drone.yml 2023-11-04 12:47:34 +01:00
af7f1a24a7 Merge pull request 'fix on filter' (#7) from ipblc-filter-fix into master
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #7
2023-11-04 12:47:04 +01:00
4c697c2e0c fix on filter
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-11-04 12:42:07 +01:00
13 changed files with 908 additions and 701 deletions

View File

@ -8,14 +8,20 @@ platform:
arch: amd64 arch: amd64
steps: steps:
- name: test and build - name: build and test
image: rust:1 image: rust:1
pull: always pull: always
commands: commands:
- apt-get update -y - apt-get update -y
- apt-get install -y libnftnl-dev libmnl-dev - apt-get install -y libnftnl-dev libmnl-dev
- curl -o /usr/bin/sccache https://assets.paulbsd.com/sccache_linux_amd64
- chmod +x /usr/bin/sccache
- cargo build --verbose --all - cargo build --verbose --all
- cargo test --verbose --all - cargo test --verbose --all
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes: volumes:
- name: cargo - name: cargo
path: /usr/local/cargo/registry path: /usr/local/cargo/registry
@ -31,9 +37,15 @@ steps:
commands: commands:
- apt-get update -y - apt-get update -y
- apt-get install -y libnftnl-dev libmnl-dev - apt-get install -y libnftnl-dev libmnl-dev
- curl -o /usr/bin/sccache https://assets.paulbsd.com/sccache_linux_amd64
- chmod +x /usr/bin/sccache
- cargo build --release --verbose --all - cargo build --release --verbose --all
- cd target/release - cd target/release
- tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc - tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes: volumes:
- name: cargo - name: cargo
path: /usr/local/cargo/registry path: /usr/local/cargo/registry
@ -75,14 +87,20 @@ platform:
arch: arm64 arch: arm64
steps: steps:
- name: test and build - name: build and test
image: rust:1 image: rust:1
pull: always pull: always
commands: commands:
- apt-get update -y - apt-get update -y
- apt-get install -y libnftnl-dev libmnl-dev - apt-get install -y libnftnl-dev libmnl-dev
- curl -o /usr/bin/sccache https://assets.paulbsd.com/sccache_linux_arm64
- chmod +x /usr/bin/sccache
- cargo build --verbose --all - cargo build --verbose --all
- cargo test --verbose --all - cargo test --verbose --all
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes: volumes:
- name: cargo - name: cargo
path: /usr/local/cargo/registry path: /usr/local/cargo/registry
@ -98,9 +116,15 @@ steps:
commands: commands:
- apt-get update -y - apt-get update -y
- apt-get install -y libnftnl-dev libmnl-dev - apt-get install -y libnftnl-dev libmnl-dev
- curl -o /usr/bin/sccache https://assets.paulbsd.com/sccache_linux_arm64
- chmod +x /usr/bin/sccache
- cargo build --release --verbose --all - cargo build --release --verbose --all
- cd target/release - cd target/release
- tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc - tar -czvf ipblc-${DRONE_TAG}-${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}.tar.gz ipblc
environment:
RUSTC_WRAPPER: /usr/bin/sccache
SCCACHE_WEBDAV_ENDPOINT: https://sccache.paulbsd.com
SCCACHE_WEBDAV_KEY_PREFIX: sccache
volumes: volumes:
- name: cargo - name: cargo
path: /usr/local/cargo/registry path: /usr/local/cargo/registry

835
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ipblc" name = "ipblc"
version = "1.5.0" version = "1.7.0"
edition = "2021" edition = "2021"
authors = ["PaulBSD <paul@paulbsd.com>"] authors = ["PaulBSD <paul@paulbsd.com>"]
description = "ipblc is a tool that search and send attacking ip addresses to ipbl" description = "ipblc is a tool that search and send attacking ip addresses to ipbl"
@ -21,8 +21,9 @@ regex = "1.10"
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] } reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
tokio = { version = "1.33", features = ["full", "sync"] } sd-notify = { version = "0.4" }
tungstenite = { version = "0.20", features = ["handshake", "rustls-tls-native-roots"] } tokio = { version = "1.35", features = ["full", "sync"] }
tungstenite = { version = "0.21", features = ["handshake", "rustls-tls-native-roots"] }
## to optimize binary size (slow compile time) ## to optimize binary size (slow compile time)
#[profile.release] #[profile.release]

View File

@ -48,6 +48,7 @@ Options:
- ✅ Local bound tcp api socket - ✅ Local bound tcp api socket
- ✅ ZMQ -> Websocket - ✅ ZMQ -> Websocket
- ✅ Bug in RwLocks (agent often give up) - ✅ Bug in RwLocks (agent often give up)
- ❌ Create memory friendly structs for ipdata
### Notes ### Notes

View File

@ -17,7 +17,7 @@ use std::path::Path;
pub const GIT_VERSION: &str = git_version!(args = ["--always", "--dirty="]); pub const GIT_VERSION: &str = git_version!(args = ["--always", "--dirty="]);
const MASTERSERVER: &str = "ipbl.paulbsd.com"; const MASTERSERVER: &str = "ipbl.paulbsd.com";
const WSSUBSCRIPTION: &str = "ipbl"; const WSSUBSCRIPTION: &str = "ipbl";
const CONFIG_RETRY: u64 = 2; const CONFIG_RETRY_INTERVAL: u64 = 2;
const WEB_CLIENT_TIMEOUT: i64 = 5; const WEB_CLIENT_TIMEOUT: i64 = 5;
#[derive(Debug)] #[derive(Debug)]
@ -28,6 +28,7 @@ pub struct Context {
pub flags: Flags, pub flags: Flags,
pub sas: HashMap<String, SetMap>, pub sas: HashMap<String, SetMap>,
pub hashwd: HashMap<String, WatchDescriptor>, pub hashwd: HashMap<String, WatchDescriptor>,
pub reloadinterval: isize,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -35,7 +36,7 @@ pub struct SetMap {
pub filename: String, pub filename: String,
pub fullpath: String, pub fullpath: String,
pub regex: Regex, pub regex: Regex,
pub set: Set, pub set: SetCfg,
pub watchedfiles: HashMap<String, u64>, pub watchedfiles: HashMap<String, u64>,
pub wd: WatchDescriptor, pub wd: WatchDescriptor,
} }
@ -64,6 +65,7 @@ impl Context {
sas: HashMap::new(), sas: HashMap::new(),
blocklist: HashMap::new(), blocklist: HashMap::new(),
hashwd: HashMap::new(), hashwd: HashMap::new(),
reloadinterval: 5,
}; };
print!("Loading config ... "); print!("Loading config ... ");
@ -101,12 +103,12 @@ impl Context {
.send() .send()
.await; .await;
let req = match resp { let req = match resp {
Ok(re) => re, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
let data: Discovery = match req.json().await { let data: Discovery = match req.json().await {
Ok(res) => res, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
Ok(data) Ok(data)
} }
@ -125,10 +127,10 @@ impl Context {
} }
break; break;
} }
Err(err) => { Err(e) => {
println!("error loading config: {err}, retrying in {CONFIG_RETRY}s"); println!("error loading config: {e}, retrying in {CONFIG_RETRY_INTERVAL}s");
last_in_err = true; last_in_err = true;
sleep_s(CONFIG_RETRY).await; sleep_s(CONFIG_RETRY_INTERVAL).await;
} }
}; };
} }
@ -167,18 +169,19 @@ impl Context {
} }
pub async fn update_blocklist(&mut self, ipevent: &IpEvent) -> Option<IpEvent> { pub async fn update_blocklist(&mut self, ipevent: &IpEvent) -> Option<IpEvent> {
match self.cfg.sets.get(&ipevent.ipdata.src) { match &ipevent.ipdata {
Some(ipdata) => match self.cfg.sets.get(&ipdata.src) {
Some(set) => { Some(set) => {
let starttime = DateTime::parse_from_rfc3339(ipevent.ipdata.date.as_str()) let starttime = DateTime::parse_from_rfc3339(ipdata.date.as_str())
.unwrap() .unwrap()
.with_timezone(&chrono::Local); .with_timezone(&chrono::Local);
let blocktime = set.blocktime; let blocktime = set.blocktime;
if ipevent.mode == "file".to_string() && gethostname(true) == ipevent.hostname { if ipevent.mode == "file".to_string() && gethostname(true) == ipevent.hostname {
let block = self let block =
.blocklist self.blocklist
.entry(ipevent.ipdata.ip.to_string()) .entry(ipdata.ip.to_string())
.or_insert(BlockIpData { .or_insert(BlockIpData {
ipdata: ipevent.ipdata.clone(), ipdata: ipdata.clone(),
tryfail: 0, tryfail: 0,
starttime, starttime,
blocktime, blocktime,
@ -190,9 +193,9 @@ impl Context {
} }
} else { } else {
self.blocklist self.blocklist
.entry(ipevent.ipdata.ip.to_string()) .entry(ipdata.ip.to_string())
.or_insert(BlockIpData { .or_insert(BlockIpData {
ipdata: ipevent.ipdata.clone(), ipdata: ipdata.clone(),
tryfail: set.tryfail, tryfail: set.tryfail,
starttime, starttime,
blocktime, blocktime,
@ -200,6 +203,8 @@ impl Context {
} }
} }
None => {} None => {}
},
None => {}
} }
None None
} }
@ -282,7 +287,7 @@ impl Context {
#[derive(Debug, Deserialize, Serialize, Clone)] #[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Config { pub struct Config {
pub sets: HashMap<String, Set>, pub sets: HashMap<String, SetCfg>,
#[serde(skip_serializing)] #[serde(skip_serializing)]
pub trustnets: Vec<String>, pub trustnets: Vec<String>,
pub ws: HashMap<String, WebSocketCfg>, pub ws: HashMap<String, WebSocketCfg>,
@ -294,7 +299,7 @@ impl Config {
Self { Self {
sets: HashMap::from([ sets: HashMap::from([
("smtp".to_string(), ("smtp".to_string(),
Set { SetCfg {
src: "smtp".to_string(), src: "smtp".to_string(),
filename: "mail.log".to_string(), filename: "mail.log".to_string(),
regex: "(SASL LOGIN authentication failed)".to_string(), regex: "(SASL LOGIN authentication failed)".to_string(),
@ -303,7 +308,7 @@ impl Config {
tryfail: 5, tryfail: 5,
}), }),
("ssh".to_string(), ("ssh".to_string(),
Set { SetCfg {
src: "ssh".to_string(), src: "ssh".to_string(),
filename: "auth.log".to_string(), filename: "auth.log".to_string(),
regex: "(Invalid user|BREAK|not allowed because|no matching key exchange method found)".to_string(), regex: "(Invalid user|BREAK|not allowed because|no matching key exchange method found)".to_string(),
@ -312,7 +317,7 @@ impl Config {
tryfail: 5, tryfail: 5,
},), },),
("http".to_string(), ("http".to_string(),
Set { SetCfg {
src: "http".to_string(), src: "http".to_string(),
filename: "".to_string(), filename: "".to_string(),
regex: "(anonymousfox.co)".to_string(), regex: "(anonymousfox.co)".to_string(),
@ -321,7 +326,7 @@ impl Config {
tryfail: 5, tryfail: 5,
},), },),
("openvpn".to_string(), ("openvpn".to_string(),
Set { SetCfg {
src: "openvpn".to_string(), src: "openvpn".to_string(),
filename: "status".to_string(), filename: "status".to_string(),
regex: "(UNDEF)".to_string(), regex: "(UNDEF)".to_string(),
@ -350,96 +355,40 @@ impl Config {
} }
pub async fn load(&mut self, server: &String) -> Result<(), ReqError> { pub async fn load(&mut self, server: &String) -> Result<(), ReqError> {
self.get_global_config(server).await?; self.get_config(server).await?;
self.get_trustnets(server).await?;
self.get_sets(server).await?;
self.get_ws_config(server).await?;
Ok(()) Ok(())
} }
async fn get_global_config(&mut self, server: &String) -> Result<(), ReqError> { async fn get_config(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> =
httpclient().get(format!("{server}/config")).send().await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
};
let data: HashMap<String, GlobalConfig> = match req.json::<Vec<GlobalConfig>>().await {
Ok(res) => {
let mut out: HashMap<String, GlobalConfig> = HashMap::new();
res.into_iter().map(|x| x).for_each(|x| {
out.insert(x.key.to_string(), x);
});
out
}
Err(err) => return Err(err),
};
let key = "".to_string();
self.api = data
.get(&key.to_string())
.unwrap_or(&GlobalConfig {
key: "api".to_string(),
value: "127.0.0.1:8060".to_string(),
})
.value
.clone();
Ok(())
}
async fn get_trustnets(&mut self, server: &String) -> Result<(), ReqError> {
let resp: Result<Response, ReqError> = httpclient() let resp: Result<Response, ReqError> = httpclient()
.get(format!("{server}/config/trustlist")) .get(format!("{server}/config?v=2"))
.send() .send()
.await; .await;
let req = match resp { let req = match resp {
Ok(re) => re, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
let data: Vec<String> = match req.json::<Vec<String>>().await { let data: GlobalConfigV2 = match req.json::<GlobalConfigV2>().await {
Ok(res) => res, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
self.trustnets = data;
Ok(())
}
async fn get_sets(&mut self, server: &String) -> Result<(), ReqError> { for d in data.sets {
let resp: Result<Response, ReqError> = httpclient()
.get(format!("{server}/config/sets"))
.send()
.await;
let req = match resp {
Ok(re) => re,
Err(err) => return Err(err),
};
let data: Vec<Set> = match req.json::<Vec<Set>>().await {
Ok(res) => res,
Err(err) => return Err(err),
};
for d in data {
self.sets.insert(d.src.clone(), d); self.sets.insert(d.src.clone(), d);
} }
Ok(())
}
async fn get_ws_config(&mut self, server: &String) -> Result<(), ReqError> { self.trustnets = data.trustlists;
let resp: Result<Response, ReqError> =
httpclient().get(format!("{server}/config/ws")).send().await; data.ws.into_iter().map(|x| x).for_each(|x| {
let req = match resp { self.ws.insert(x.t.to_string(), x);
Ok(re) => re,
Err(err) => return Err(err),
};
let data: HashMap<String, WebSocketCfg> = match req.json::<Vec<WebSocketCfg>>().await {
Ok(res) => {
let mut out: HashMap<String, WebSocketCfg> = HashMap::new();
res.into_iter().map(|x| x).for_each(|x| {
out.insert(x.t.to_string(), x);
}); });
out
} self.api = data
Err(err) => return Err(err), .cfg
}; .get(&"api".to_string())
self.ws = data; .unwrap_or(&self.api)
.clone();
Ok(()) Ok(())
} }
@ -451,13 +400,13 @@ impl Config {
.await; .await;
let req = match resp { let req = match resp {
Ok(re) => re, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
let data: Vec<IpData> = match req.json::<Vec<IpData>>().await { let data: Vec<IpData> = match req.json::<Vec<IpData>>().await {
Ok(res) => res, Ok(o) => o,
Err(err) => return Err(err), Err(e) => return Err(e),
}; };
Ok(data) Ok(data)
@ -468,8 +417,8 @@ impl Config {
for trustnet in &self.trustnets { for trustnet in &self.trustnets {
match trustnet.parse() { match trustnet.parse() {
Ok(net) => trustnets.push(net), Ok(net) => trustnets.push(net),
Err(err) => { Err(e) => {
println!("error parsing {trustnet}, error: {err}"); println!("error parsing {trustnet}, error: {e}");
} }
}; };
} }
@ -481,13 +430,7 @@ impl Config {
msgtype: String::from("bootstrap"), msgtype: String::from("bootstrap"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: gethostname(true), hostname: gethostname(true),
ipdata: IpData { ipdata: None,
t: 4,
ip: "".to_string(),
src: "".to_string(),
date: "".to_string(),
hostname: "".to_string(),
},
} }
} }
} }
@ -508,13 +451,15 @@ pub fn httpclient() -> Client {
} }
#[derive(Debug, Deserialize, Serialize, Clone)] #[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GlobalConfig { pub struct GlobalConfigV2 {
pub key: String, pub cfg: HashMap<String, String>,
pub value: String, pub sets: Vec<SetCfg>,
pub trustlists: Vec<String>,
pub ws: Vec<WebSocketCfg>,
} }
#[derive(Debug, Deserialize, Serialize, Clone)] #[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Set { pub struct SetCfg {
pub src: String, pub src: String,
pub filename: String, pub filename: String,
pub regex: String, pub regex: String,
@ -543,13 +488,13 @@ pub struct URL {
pub path: String, pub path: String,
} }
impl PartialEq for Set { impl PartialEq for SetCfg {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.src == other.src self.src == other.src
} }
} }
impl Hash for Set { impl Hash for SetCfg {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.src.hash(state); self.src.hash(state);
} }
@ -573,13 +518,13 @@ mod test {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 4, t: 4,
ip: "1.1.1.1".to_string(), ip: "1.1.1.1".to_string(),
hostname: "test1".to_string(), hostname: "test1".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "ssh".to_string(), src: "ssh".to_string(),
}, }),
}) })
.await; .await;
} }
@ -589,13 +534,13 @@ mod test {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 4, t: 4,
ip: "1.1.1.2".to_string(), ip: "1.1.1.2".to_string(),
hostname: "test2".to_string(), hostname: "test2".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "http".to_string(), src: "http".to_string(),
}, }),
}) })
.await; .await;
} }
@ -604,13 +549,13 @@ mod test {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 4, t: 4,
ip: "1.1.1.3".to_string(), ip: "1.1.1.3".to_string(),
hostname: "testgood".to_string(), hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "http".to_string(), src: "http".to_string(),
}, }),
}) })
.await; .await;
@ -618,13 +563,13 @@ mod test {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 4, t: 4,
ip: "1.1.1.4".to_string(), ip: "1.1.1.4".to_string(),
hostname: "testgood".to_string(), hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "http".to_string(), src: "http".to_string(),
}, }),
}) })
.await; .await;
@ -632,26 +577,26 @@ mod test {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 4, t: 4,
ip: "1.1.1.4".to_string(), ip: "1.1.1.4".to_string(),
hostname: "testgood".to_string(), hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "http".to_string(), src: "http".to_string(),
}, }),
}) })
.await; .await;
ctx.update_blocklist(&mut IpEvent { ctx.update_blocklist(&mut IpEvent {
msgtype: String::from("add"), msgtype: String::from("add"),
mode: String::from("ws"), mode: String::from("ws"),
hostname: String::from("localhost"), hostname: String::from("localhost"),
ipdata: IpData { ipdata: Some(IpData {
t: 6, t: 6,
ip: "2a00:1450:4007:805::2003".to_string(), ip: "2a00:1450:4007:805::2003".to_string(),
hostname: "testgood".to_string(), hostname: "testgood".to_string(),
date: now.to_rfc3339().to_string(), date: now.to_rfc3339().to_string(),
src: "http".to_string(), src: "http".to_string(),
}, }),
}) })
.await; .await;

123
src/fw.rs
View File

@ -19,6 +19,41 @@ pub fn fwglobalinit<'a>() -> ((Batch, Table), (Batch, Table)) {
((batch4, table4), (batch6, table6)) ((batch4, table4), (batch6, table6))
} }
macro_rules! initrules {
($batch:expr, $table:expr, $chain:ident) => {
$chain.set_hook(nftnl::Hook::In, 1);
$chain.set_policy(nftnl::Policy::Accept);
$batch.add(&$chain, nftnl::MsgType::Add);
$batch.add(&Rule::new(&$chain), nftnl::MsgType::Del);
let mut rule = Rule::new(&$chain);
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict accept));
$batch.add(&rule, nftnl::MsgType::Add);
};
}
macro_rules! createrules {
($ipdata:ident, $chain:ident, $batch:ident, $t:ty, $ip_t:ident) => {
let mut rule = Rule::new(&$chain);
let ip = $ipdata.ip.parse::<$t>().unwrap();
rule.add_expr(&nft_expr!(payload $ip_t saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
$batch.add(&rule, nftnl::MsgType::Add);
}
}
fn fwinit(t: FwTableType) -> (Batch, Table) { fn fwinit(t: FwTableType) -> (Batch, Table) {
let table_name: String; let table_name: String;
let table: Table; let table: Table;
@ -48,77 +83,41 @@ fn fwinit(t: FwTableType) -> (Batch, Table) {
} }
pub fn fwblock( pub fn fwblock(
ips_add: &Vec<IpData>, ips_add_all: &Vec<IpData>,
ret: &mut Vec<String>, ret: &mut Vec<String>,
fwlen: &mut usize, fwlen: &mut usize,
) -> std::result::Result<(), Error> { ) -> std::result::Result<(), Error> {
let ((mut batch4, table4), (mut batch6, table6)) = fwglobalinit(); let ((mut batch4, table4), (mut batch6, table6)) = fwglobalinit();
// build chain for ipv4
let mut chain4 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table4); let mut chain4 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table4);
chain4.set_hook(nftnl::Hook::In, 1);
chain4.set_policy(nftnl::Policy::Accept);
// add chain
batch4.add(&chain4, nftnl::MsgType::Add);
batch4.add(&Rule::new(&chain4), nftnl::MsgType::Del);
let mut rule4 = Rule::new(&chain4);
rule4.add_expr(&nft_expr!(ct state));
rule4.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32));
rule4.add_expr(&nft_expr!(cmp != 0u32));
rule4.add_expr(&nft_expr!(counter));
rule4.add_expr(&nft_expr!(verdict accept));
batch4.add(&rule4, nftnl::MsgType::Add);
// build chain for ipv6
let mut chain6 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table6); let mut chain6 = Chain::new(&CString::new(PKG_NAME).unwrap(), &table6);
chain6.set_hook(nftnl::Hook::In, 1);
chain6.set_policy(nftnl::Policy::Accept);
// add chain initrules!(batch4, table4, chain4);
batch6.add(&chain6, nftnl::MsgType::Add); initrules!(batch6, table6, chain6);
batch6.add(&Rule::new(&chain6), nftnl::MsgType::Del); let mut factor = 1;
if ips_add_all.len() > 100 {
factor = (ips_add_all.len() / 10) as usize
}
let mut rule6 = Rule::new(&chain6); let ips_add_tmp: Vec<IpData> = ips_add_all.clone().iter().map(|x| x.clone()).collect();
rule6.add_expr(&nft_expr!(ct state)); let mut ips_add_iter = ips_add_tmp.chunks(factor);
rule6.add_expr(&nft_expr!(bitwise mask 4u32, xor 0u32)); let mut ips_add: Vec<&[IpData]> = vec![];
rule6.add_expr(&nft_expr!(cmp != 0u32)); while let Some(x) = ips_add_iter.next() {
rule6.add_expr(&nft_expr!(counter)); ips_add.push(x);
rule6.add_expr(&nft_expr!(verdict accept)); }
batch6.add(&rule6, nftnl::MsgType::Add);
// build and add rules // build and add rules
for ipdata in ips_add.clone() { for ipdata_group in ips_add.clone() {
for ipdata in ipdata_group {
match ipdata.t { match ipdata.t {
4 => { 4 => {
let ip = ipdata.ip.parse::<Ipv4Addr>().unwrap(); createrules!(ipdata, chain4, batch4, Ipv4Addr, ipv4);
let mut rule = Rule::new(&chain4);
rule.add_expr(&nft_expr!(payload ipv4 saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
batch4.add(&rule, nftnl::MsgType::Add);
} }
6 => { 6 => {
let ip = ipdata.ip.parse::<Ipv6Addr>().unwrap(); createrules!(ipdata, chain6, batch6, Ipv6Addr, ipv6);
let mut rule = Rule::new(&chain6);
rule.add_expr(&nft_expr!(payload ipv6 saddr));
rule.add_expr(&nft_expr!(cmp == ip));
rule.add_expr(&nft_expr!(ct state));
rule.add_expr(&nft_expr!(bitwise mask 10u32, xor 0u32));
rule.add_expr(&nft_expr!(cmp != 0u32));
rule.add_expr(&nft_expr!(counter));
rule.add_expr(&nft_expr!(verdict drop));
batch6.add(&rule, nftnl::MsgType::Add);
} }
_ => { _ => {}
todo!()
} }
} }
} }
@ -126,12 +125,20 @@ pub fn fwblock(
// validate and send batch // validate and send batch
for b in [batch4, batch6] { for b in [batch4, batch6] {
let bf = b.finalize(); let bf = b.finalize();
send_and_process(&bf).unwrap(); match send_and_process(&bf) {
Ok(_) => {}
Err(e) => {
println!("error sending batch: {e}");
} }
if fwlen != &mut ips_add.len() { };
ret.push(format!("{length} ip in firewall", length = ips_add.len()));
} }
*fwlen = ips_add.len(); if fwlen != &mut ips_add_all.len() {
ret.push(format!(
"{length} ip in firewall",
length = ips_add_all.len()
));
}
*fwlen = ips_add_all.len();
Ok(()) Ok(())
} }

View File

@ -22,7 +22,7 @@ pub struct IpEvent {
pub msgtype: String, pub msgtype: String,
pub mode: String, pub mode: String,
pub hostname: String, pub hostname: String,
pub ipdata: IpData, pub ipdata: Option<IpData>,
} }
#[macro_export] #[macro_export]
@ -35,6 +35,14 @@ macro_rules! ipevent {
ipdata: $ipdata, ipdata: $ipdata,
} }
}; };
($msgtype:expr,$mode:expr,$hostname:expr) => {
IpEvent {
msgtype: String::from($msgtype),
mode: String::from($mode),
hostname: $hostname,
ipdata: None,
}
};
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -54,6 +62,19 @@ pub struct IpData {
pub hostname: String, pub hostname: String,
} }
#[macro_export]
macro_rules! ipdata {
($t:expr,$ip:expr,$src:expr,$date:expr,$hostname:expr) => {
IpData {
t: $t.clone(),
ip: $ip.clone(),
src: $src.clone(),
date: $date.clone(),
hostname: $hostname.clone(),
}
};
}
impl PartialEq for IpData { impl PartialEq for IpData {
fn eq(&self, other: &IpData) -> bool { fn eq(&self, other: &IpData) -> bool {
self.ip.as_bytes() == other.ip.as_bytes() && self.src == other.src self.ip.as_bytes() == other.ip.as_bytes() && self.src == other.src
@ -89,7 +110,7 @@ impl Display for IpData {
} }
pub fn filter( pub fn filter(
lines: Box<dyn Read>, reader: Box<dyn Read>,
iplist: &mut Vec<IpData>, iplist: &mut Vec<IpData>,
trustnets: &Vec<IpNet>, trustnets: &Vec<IpNet>,
regex: &Regex, regex: &Regex,
@ -98,7 +119,8 @@ pub fn filter(
) -> isize { ) -> isize {
let mut ips = 0; let mut ips = 0;
let hostname = gethostname(true); let hostname = gethostname(true);
for line in BufReader::new(lines).lines() { let lines = BufReader::new(reader).lines();
for line in lines.into_iter() {
if let Ok(l) = line { if let Ok(l) = line {
if regex.is_match(l.as_str()) { if regex.is_match(l.as_str()) {
let s_ipaddr: String; let s_ipaddr: String;
@ -122,6 +144,14 @@ pub fn filter(
} }
}; };
let ipaddr: IpAddr = match s_ipaddr.parse() {
Ok(ip) => ip,
Err(err) => {
println!("unparseable IP: {err} {s_ipaddr}");
continue;
}
};
let s_date: DateTime<Local>; let s_date: DateTime<Local>;
match R_DATE.captures(l.as_str()) { match R_DATE.captures(l.as_str()) {
Some(sdt) => { Some(sdt) => {
@ -135,22 +165,8 @@ pub fn filter(
} }
}; };
let ipaddr: IpAddr = match s_ipaddr.parse() {
Ok(ip) => ip,
Err(err) => {
println!("unparseable IP: {err} {s_ipaddr}");
continue;
}
};
if !is_trusted(&ipaddr, &trustnets) { if !is_trusted(&ipaddr, &trustnets) {
iplist.push(IpData { iplist.push(ipdata!(t, s_ipaddr, src, s_date.to_rfc3339(), hostname));
ip: s_ipaddr,
t: t,
src: src.to_owned(),
date: s_date.to_rfc3339().to_owned(),
hostname: hostname.to_owned(),
});
ips += 1; ips += 1;
}; };
} }

View File

@ -11,6 +11,7 @@ use chrono::prelude::*;
use chrono::prelude::{DateTime, Local}; use chrono::prelude::{DateTime, Local};
use chrono::Duration; use chrono::Duration;
use nix::sys::inotify::{InitFlags, Inotify, InotifyEvent}; use nix::sys::inotify::{InitFlags, Inotify, InotifyEvent};
use sd_notify::*;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc::{channel, Receiver, Sender}; use tokio::sync::mpsc::{channel, Receiver, Sender};
@ -21,6 +22,13 @@ const BL_CHAN_SIZE: usize = 32;
const WS_CHAN_SIZE: usize = 64; const WS_CHAN_SIZE: usize = 64;
const LOOP_MAX_WAIT: u64 = 5; const LOOP_MAX_WAIT: u64 = 5;
macro_rules! log_with_systemd {
($msg:expr) => {
println!("{}", $msg);
notify(false, &[NotifyState::Status(format!("{}", $msg).as_str())]).unwrap();
};
}
pub async fn run() { pub async fn run() {
let inotify = Inotify::init(InitFlags::empty()).unwrap(); let inotify = Inotify::init(InitFlags::empty()).unwrap();
let globalctx = Context::new(&inotify).await; let globalctx = Context::new(&inotify).await;
@ -31,7 +39,7 @@ pub async fn run() {
let pkgversion = format!("{}@{}", env!("CARGO_PKG_VERSION"), GIT_VERSION); let pkgversion = format!("{}@{}", env!("CARGO_PKG_VERSION"), GIT_VERSION);
let mut last_cfg_reload: DateTime<Local> = Local::now().trunc_subsecs(0); let mut last_cfg_reload: DateTime<Local> = Local::now().trunc_subsecs(0);
println!("Launching {}, version {}", PKG_NAME, pkgversion); log_with_systemd!(format!("Launching {}, version {}", PKG_NAME, pkgversion));
fwglobalinit(); fwglobalinit();
let ctxapi = Arc::clone(&ctxarc); let ctxapi = Arc::clone(&ctxarc);
@ -60,6 +68,8 @@ pub async fn run() {
compare_files_changes(&ctxclone, &mut blrx, &ipeventclone).await; compare_files_changes(&ctxclone, &mut blrx, &ipeventclone).await;
}); });
notify(false, &[NotifyState::Ready]).unwrap();
loop { loop {
let mut ret: Vec<String> = Vec::new(); let mut ret: Vec<String> = Vec::new();
@ -69,17 +79,16 @@ pub async fn run() {
ipevent = ipeventrx.recv() => { ipevent = ipeventrx.recv() => {
let received_ip = ipevent.unwrap(); let received_ip = ipevent.unwrap();
let (toblock,server); let (toblock,server) = {
{
let ctx = ctxclone.read().await; let ctx = ctxclone.read().await;
toblock = ctx.get_blocklist_toblock().await; (ctx.get_blocklist_toblock().await,ctx.flags.server.clone())
server = ctx.flags.server.clone(); };
}
if received_ip.msgtype == "bootstrap".to_string() { if received_ip.msgtype == "bootstrap".to_string() {
for ip_to_send in toblock { for ip_to_send in toblock {
let ipe = ipevent!("init","ws",gethostname(true),ip_to_send); let ipe = ipevent!("init","ws",gethostname(true),Some(ip_to_send));
if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await { if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await; wssocketrr = websocketreqrep(&ctxwsrr).await;
break; break;
} }
@ -88,27 +97,31 @@ pub async fn run() {
} }
// refresh context blocklist // refresh context blocklist
let filtered_ipevent; let filtered_ipevent = {
{ ctxarc.write().await.update_blocklist(&received_ip).await
let mut ctx = ctxarc.write().await; };
filtered_ipevent = ctx.update_blocklist(&received_ip).await;
}
// send ip list to api and ws sockets // send ip list to api and ws sockets
if let Some(ipevent) = filtered_ipevent { if let Some(ipevent) = filtered_ipevent {
if received_ip.msgtype != "init" { if received_ip.msgtype != "init" {
println!("sending {} to api and ws", ipevent.ipdata.ip); log_with_systemd!(format!("sending {} to api and ws", ipevent.ipdata.clone().unwrap().ip));
let ipe = ipevent!("add","ws",gethostname(true),ipevent.ipdata); let ipe = ipevent!("add","ws",gethostname(true),ipevent.ipdata);
send_to_ipbl_api(&server.clone(), &ipe).await; send_to_ipbl_api(&server.clone(), &ipe).await;
let status = send_to_ipbl_websocket(&mut wssocketrr, &ipe).await; if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
if !status { wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await; wssocketrr = websocketreqrep(&ctxwsrr).await;
continue; continue;
} }
} }
} }
} }
_val = sleep_s(LOOP_MAX_WAIT) => {} _val = sleep_s(LOOP_MAX_WAIT) => {
let ipe = ipevent!("ping", "ws", gethostname(true));
if !send_to_ipbl_websocket(&mut wssocketrr, &ipe).await {
wssocketrr.close(None).unwrap();
wssocketrr = websocketreqrep(&ctxwsrr).await;
}
}
}; };
let ctxclone = Arc::clone(&ctxarc); let ctxclone = Arc::clone(&ctxarc);
@ -116,7 +129,8 @@ pub async fn run() {
// log lines // log lines
if ret.len() > 0 { if ret.len() > 0 {
println!("{ret}", ret = ret.join(", ")); let result = ret.join(", ");
log_with_systemd!(format!("{result}"));
} }
let ctxclone = Arc::clone(&ctxarc); let ctxclone = Arc::clone(&ctxarc);
@ -132,9 +146,26 @@ async fn handle_cfg_reload(
) { ) {
let now_cfg_reload = Local::now().trunc_subsecs(0); let now_cfg_reload = Local::now().trunc_subsecs(0);
if (now_cfg_reload - *last_cfg_reload) > Duration::seconds(LOOP_MAX_WAIT as i64) { if (now_cfg_reload - *last_cfg_reload) > Duration::seconds(LOOP_MAX_WAIT as i64) {
let mut ctx = ctxclone.write().await; let inotify;
let inotify = inoarc.read().await; loop {
match ctx.load(&inotify).await { inotify = match inoarc.try_read() {
Ok(o) => o,
Err(e) => {
println!("{e}");
sleep_s(1).await;
continue;
}
};
break;
}
let mut ctxtest = match ctxclone.try_write() {
Ok(o) => o,
Err(e) => {
println!("{e}");
return;
}
};
match ctxtest.load(&inotify).await {
Ok(_) => { Ok(_) => {
*last_cfg_reload = Local::now().trunc_subsecs(0); *last_cfg_reload = Local::now().trunc_subsecs(0);
} }
@ -158,8 +189,8 @@ async fn handle_fwblock(ctxclone: Arc<RwLock<Context>>, ret: &mut Vec<String>, f
// apply firewall blocking // apply firewall blocking
match fwblock(&toblock, ret, fwlen) { match fwblock(&toblock, ret, fwlen) {
Ok(_) => {} Ok(_) => {}
Err(err) => { Err(e) => {
println!("Err: {err}, unable to push firewall rules, use super user") println!("err: {e}, unable to push firewall rules, use super user")
} }
}; };
} }
@ -181,12 +212,12 @@ async fn watchfiles(inoarc: Arc<RwLock<Inotify>>) -> Receiver<FileEvent> {
async fn get_last_file_size(w: &mut HashMap<String, u64>, path: &str) -> (u64, bool) { async fn get_last_file_size(w: &mut HashMap<String, u64>, path: &str) -> (u64, bool) {
let currentlen = match std::fs::metadata(&path.to_string()) { let currentlen = match std::fs::metadata(&path.to_string()) {
Ok(u) => u.len().clone(), Ok(u) => u.len(),
Err(_) => 0u64, Err(_) => 0u64,
}; };
let lastlen = match w.insert(path.to_string(), currentlen) { let lastlen = match w.insert(path.to_string(), currentlen) {
Some(u) => u, Some(u) => u,
None => 0u64, None => currentlen,
}; };
(lastlen, lastlen != currentlen) (lastlen, lastlen != currentlen)
} }
@ -201,20 +232,16 @@ async fn compare_files_changes(
let modfiles = inrx.recv().await.unwrap(); let modfiles = inrx.recv().await.unwrap();
let mut iplist: Vec<IpData> = vec![]; let mut iplist: Vec<IpData> = vec![];
let sask; let sas = {
let sas;
{
let ctx = ctxarc.read().await; let ctx = ctxarc.read().await;
sas = ctx.sas.clone();
sask = sas.keys();
tnets = ctx.cfg.build_trustnets(); tnets = ctx.cfg.build_trustnets();
} ctx.sas.clone()
};
match modfiles.inevent.name { match modfiles.inevent.name {
Some(name) => { Some(name) => {
let filename = name.to_str().unwrap(); let filename = name.to_str().unwrap();
for sak in sask { for (sak, sa) in sas.clone().iter_mut() {
let sa = sas.get(sak).unwrap();
if modfiles.inevent.wd == sa.wd { if modfiles.inevent.wd == sa.wd {
let handle: String; let handle: String;
if sa.filename.as_str() == "" { if sa.filename.as_str() == "" {
@ -225,13 +252,11 @@ async fn compare_files_changes(
continue; continue;
} }
let (filesize, sizechanged); let (filesize, sizechanged) = {
{
let mut ctx = ctxarc.write().await; let mut ctx = ctxarc.write().await;
let sa = ctx.sas.get_mut(sak).unwrap(); let sa = ctx.sas.get_mut(sak).unwrap();
(filesize, sizechanged) = get_last_file_size(&mut sa.watchedfiles, &handle).await
get_last_file_size(&mut sa.watchedfiles, &handle).await; };
}
if !sizechanged { if !sizechanged {
continue; continue;
@ -254,7 +279,7 @@ async fn compare_files_changes(
} }
} }
for ip in iplist { for ip in iplist {
let ipe = ipevent!("add", "file", gethostname(true), ip); let ipe = ipevent!("add", "file", gethostname(true), Some(ip));
let ipetx = ipeventtx.read().await; let ipetx = ipeventtx.read().await;
ipetx.send(ipe).await.unwrap(); ipetx.send(ipe).await.unwrap();
} }

View File

@ -4,53 +4,72 @@ use serde_json;
use std::io; use std::io;
use std::sync::Arc; use std::sync::Arc;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use tokio::net::TcpSocket; use tokio::net::TcpListener;
use tokio::sync::RwLock; use tokio::sync::RwLock;
pub async fn apiserver(ctxarc: &Arc<RwLock<Context>>) -> io::Result<()> { pub async fn apiserver(ctxarc: &Arc<RwLock<Context>>) -> io::Result<()> {
let ctxarc = ctxarc.clone(); let ctxarc = ctxarc.clone();
let addr; let addr: String = { ctxarc.read().await.cfg.api.parse().unwrap() };
{ let listener = match TcpListener::bind(addr).await {
let ctx = ctxarc.read().await; Ok(o) => o,
addr = ctx.cfg.api.parse().unwrap(); Err(e) => {
println!("error: {e}");
std::process::exit(1);
} }
};
let socket = TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let listener = socket.listen(1024).unwrap();
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
match listener.accept().await { match listener.accept().await {
Ok((stream, _addr)) => { Ok((mut socket, _addr)) => {
//let mut buf = [0; 1024]; let mut buf = vec![0; 1024];
let data;
{ match socket.readable().await {
let ctx = ctxarc.read().await; Ok(_) => {
data = serde_json::to_string(&ctx.blocklist); match socket.try_read(&mut buf) {
Ok(_) => {}
Err(e) => {
println!("error: {e}");
continue;
}
};
}
Err(e) => {
println!("error: {e}");
continue;
}
} }
match data { let msg = match String::from_utf8(buf.to_vec()) {
Ok(dt) => { Ok(o) => o.trim_matches(char::from(0)).trim().to_string(),
let (_reader, mut writer) = stream.into_split(); Err(_) => "".to_string(),
match writer.write_all(format!("{dt}").as_bytes()).await { };
let res = format_result(&ctxarc, msg.as_str()).await;
match socket.write_all(res.as_bytes()).await {
Ok(_) => {} Ok(_) => {}
Err(err) => { Err(e) => {
println!("{err}"); println!("error: {e}");
} }
} }
} }
Err(err) => { Err(err) => {
println!("unable to serialize data: {err}"); println!("error: {err}");
}
}
}
Err(err) => {
println!("couldn't get client: {}", err)
} }
} }
} }
}); });
Ok(()) Ok(())
} }
async fn format_result(ctxarc: &Arc<RwLock<Context>>, mode: &str) -> String {
let data;
let ctx = ctxarc.read().await;
match mode {
"cfg" => data = serde_json::to_string(&ctx.cfg).unwrap(),
"blocklist" => data = serde_json::to_string(&ctx.blocklist).unwrap(),
_ => data = serde_json::to_string(&ctx.blocklist).unwrap(),
};
data
}

28
src/old.rs Normal file
View File

@ -0,0 +1,28 @@
pub fn _search_subfolders(path: &Path) -> Vec<String> {
let dirs = std::fs::read_dir(path).unwrap();
let mut folders: Vec<String> = vec![];
for dir in dirs {
let dirpath = dir.unwrap().path();
let path = Path::new(dirpath.as_path());
if path.is_dir() {
folders.push(dirpath.to_str().unwrap().to_string());
for f in _search_subfolders(path) {
folders.push(f);
}
}
}
folders
}
pub fn _dedup<T: Ord + PartialOrd>(list: &mut Vec<T>) -> usize {
// Begin with sorting entries
list.sort();
// Then deduplicate
list.dedup();
// Return the length
list.len()
}
pub async fn _sleep_ms(ms: u64) {
sleep(Duration::from_millis(ms)).await;
}

View File

@ -1,21 +1,14 @@
use lazy_static::lazy_static;
use nix::unistd; use nix::unistd;
use regex::Regex;
use std::boxed::Box; use std::boxed::Box;
use std::fs::File; use std::fs::File;
use std::io::*; use std::io::*;
use std::path::Path;
use tokio::time::{sleep, Duration}; use tokio::time::{sleep, Duration};
lazy_static! {
static ref R_FILE_GZIP: Regex = Regex::new(r".*\.gz.*").unwrap();
}
pub fn read_lines(filename: &String, offset: u64) -> Option<Box<dyn Read>> { pub fn read_lines(filename: &String, offset: u64) -> Option<Box<dyn Read>> {
let mut file = match File::open(filename) { let mut file = match File::open(filename) {
Ok(f) => f, Ok(o) => o,
Err(err) => { Err(e) => {
println!("{err}"); println!("error: {e}");
return None; return None;
} }
}; };
@ -24,23 +17,15 @@ pub fn read_lines(filename: &String, offset: u64) -> Option<Box<dyn Read>> {
Some(lines) Some(lines)
} }
pub fn _dedup<T: Ord + PartialOrd>(list: &mut Vec<T>) -> usize {
// Begin with sorting entries
list.sort();
// Then deduplicate
list.dedup();
// Return the length
list.len()
}
pub async fn _sleep_ms(ms: u64) {
sleep(Duration::from_millis(ms)).await;
}
pub async fn sleep_s(s: u64) { pub async fn sleep_s(s: u64) {
sleep(Duration::from_secs(s)).await; sleep(Duration::from_secs(s)).await;
} }
#[allow(dead_code)]
pub async fn sleep_ms(m: u64) {
sleep(Duration::from_millis(m)).await;
}
pub fn gethostname(show_fqdn: bool) -> String { pub fn gethostname(show_fqdn: bool) -> String {
let hostname_cstr = unistd::gethostname().expect("Failed getting hostname"); let hostname_cstr = unistd::gethostname().expect("Failed getting hostname");
let fqdn = hostname_cstr let fqdn = hostname_cstr
@ -53,19 +38,3 @@ pub fn gethostname(show_fqdn: bool) -> String {
} }
hostname[0].to_string() hostname[0].to_string()
} }
pub fn _search_subfolders(path: &Path) -> Vec<String> {
let dirs = std::fs::read_dir(path).unwrap();
let mut folders: Vec<String> = vec![];
for dir in dirs {
let dirpath = dir.unwrap().path();
let path = Path::new(dirpath.as_path());
if path.is_dir() {
folders.push(dirpath.to_str().unwrap().to_string());
for f in _search_subfolders(path) {
folders.push(f);
}
}
}
folders
}

View File

@ -11,12 +11,12 @@ pub async fn send_to_ipbl_api(server: &str, ip: &IpEvent) {
let mut try_req = 0; let mut try_req = 0;
let client = httpclient(); let client = httpclient();
loop { loop {
match push_ip(&client, &server, &ip.ipdata).await { match push_ip(&client, &server, &ip.ipdata.clone().unwrap()).await {
Ok(_) => { Ok(_) => {
break; break;
} }
Err(err) => { Err(e) => {
println!("{err}"); println!("error: {e}");
sleep_s(1).await; sleep_s(1).await;
if try_req == MAX_FAILED_API_RATE { if try_req == MAX_FAILED_API_RATE {
break; break;

View File

@ -14,13 +14,13 @@ use tungstenite::*;
pub async fn websocketreqrep( pub async fn websocketreqrep(
ctxarc: &Arc<RwLock<Context>>, ctxarc: &Arc<RwLock<Context>>,
) -> WebSocket<MaybeTlsStream<TcpStream>> { ) -> WebSocket<MaybeTlsStream<TcpStream>> {
let (mut wssocketrr, bootstrap_event, cfg); let (mut wssocketrr, bootstrap_event, wscfg);
{ {
let ctx = ctxarc.read().await; let ctx = ctxarc.read().await;
bootstrap_event = ctx.cfg.bootstrap_event().clone(); bootstrap_event = ctx.cfg.bootstrap_event().clone();
cfg = ctx.cfg.ws.get("reqrep").unwrap().clone(); wscfg = ctx.cfg.ws.get("reqrep").unwrap().clone();
} }
wssocketrr = websocketconnect(&cfg, &gethostname(true)).await.unwrap(); wssocketrr = websocketconnect(&wscfg, &gethostname(true)).await.unwrap();
send_to_ipbl_websocket(&mut wssocketrr, &bootstrap_event).await; send_to_ipbl_websocket(&mut wssocketrr, &bootstrap_event).await;
return wssocketrr; return wssocketrr;
@ -45,17 +45,26 @@ pub async fn websocketpubsub(
Ok(msg) => { Ok(msg) => {
let tosend: IpEvent = match serde_json::from_str(msg.to_string().as_str()) { let tosend: IpEvent = match serde_json::from_str(msg.to_string().as_str()) {
Ok(o) => o, Ok(o) => o,
Err(_e) => { Err(e) => {
println!("error in pubsub: {e:?}");
continue; continue;
} }
}; };
if tosend.ipdata.hostname != gethostname(true) match tosend.ipdata.clone() {
Some(o) => {
if o.hostname != gethostname(true)
|| tosend.msgtype == "init".to_string() || tosend.msgtype == "init".to_string()
{ {
let txps = txpubsub.read().await; let txps = txpubsub.read().await;
txps.send(tosend).await.unwrap(); txps.send(tosend).await.unwrap();
} }
} }
None => {
let txps = txpubsub.read().await;
txps.send(tosend.clone()).await.unwrap();
}
}
}
Err(e) => { Err(e) => {
println!("error in pubsub: {e:?}"); println!("error in pubsub: {e:?}");
ws.close(None).unwrap(); ws.close(None).unwrap();
@ -105,11 +114,12 @@ pub async fn send_to_ipbl_websocket(
Ok(_) => {} Ok(_) => {}
Err(e) => { Err(e) => {
println!("err send read: {e:?}"); println!("err send read: {e:?}");
return handle_websocket_error(ws); return false;
} }
}; };
} else { } else {
return handle_websocket_error(ws); println!("can't write to socket");
return false;
}; };
if ws.can_read() { if ws.can_read() {
@ -117,16 +127,13 @@ pub async fn send_to_ipbl_websocket(
Ok(_) => {} Ok(_) => {}
Err(e) => { Err(e) => {
println!("err send read: {e:?}"); println!("err send read: {e:?}");
return handle_websocket_error(ws); return false;
} }
}; };
} else { } else {
return handle_websocket_error(ws); println!("can't read from socket");
return false;
}; };
true true
} }
fn handle_websocket_error(ws: &mut WebSocket<MaybeTlsStream<TcpStream>>) -> bool {
ws.close(None).unwrap();
return false;
}