Compare commits

...

2 commits

Author SHA1 Message Date
35806a2058
Update README
Signed-off-by: eternal-flame-AD <yume@yumechi.jp>
2024-11-23 00:48:39 -06:00
8f1853e773
restart process when too many runaway process happened
Signed-off-by: eternal-flame-AD <yume@yumechi.jp>
2024-11-22 20:18:22 -06:00
4 changed files with 50 additions and 21 deletions

View file

@ -2,12 +2,13 @@
This is a misskey proxy worker for ゆめちのくに (Yumechi-no-kuni) instance. Runs natively on both local and Cloudflare Workers environments! This is a misskey proxy worker for ゆめちのくに (Yumechi-no-kuni) instance. Runs natively on both local and Cloudflare Workers environments!
It has been deployed on my instance for since 11/14 under the AppArmor deployment profile.
Work in progress! Currently to do: Currently to do:
- [X] Content-Type sniffing - [X] Content-Type sniffing
- [X] SVG rendering - [X] SVG rendering
- [ ] Font rendering (likely will not run on Cloudflare Workers Free plan) - [ ] Font rendering (will not run on Cloudflare Workers Free plan)
- [X] Preset image resizing - [X] Preset image resizing
- [X] Opportunistic Redirection on large video files - [X] Opportunistic Redirection on large video files
- [X] RFC9110 compliant proxy loop detection with defensive programming against known vulnerable proxies - [X] RFC9110 compliant proxy loop detection with defensive programming against known vulnerable proxies
@ -105,4 +106,8 @@ All major distros should have an easy-to-follow guide on how to do this. Typical
This will create a highly restrictive environment: try it yourself with `aa-exec -p yumechi-no-kuni-proxy-worker [initial_foothold]` and see if you can break out :). And that is just the first layer of defense, try the more restrictive subprofiles: This will create a highly restrictive environment: try it yourself with `aa-exec -p yumechi-no-kuni-proxy-worker [initial_foothold]` and see if you can break out :). And that is just the first layer of defense, try the more restrictive subprofiles:
- `yumechi-no-kuni-proxy-worker//serve`: irreversibly dropped into before listening on the network begins. Restrict loading additional code and access to configuration files. - `yumechi-no-kuni-proxy-worker//serve`: irreversibly dropped into before listening on the network begins. Restrict loading additional code and access to configuration files.
- `yumechi-no-kuni-proxy-worker//serve//image`: absolutely no file, network or capability access. - `yumechi-no-kuni-proxy-worker//serve//image`: absolutely no file, network or capability access.
## Docker
If you still for some reason want to use Docker, you can use the `Dockerfile` provided.

View file

@ -74,7 +74,8 @@ profile yumechi-no-kuni-proxy-worker @{prog_path} {
/{,usr/}{,local/}{,s}bin/@{prog} ixr, /{,usr/}{,local/}{,s}bin/@{prog} ixr,
owner /var/lib/@{prog}/{,bin}/@{prog} ixr, owner /var/lib/@{prog}/{,bin}/@{prog} ixr,
signal (send) peer=yume-proxy-workers//serve//image, signal (send, receive) set=int,term,kill peer=yume-proxy-workers//serve,
signal (send) set=int,term,kill,usr1 peer=yume-proxy-workers//serve//image,
^image { ^image {

View file

@ -828,19 +828,42 @@ pub struct App<C: UpstreamClient, S: Sandboxing> {
/// without triggering the resource limits. /// without triggering the resource limits.
#[allow(unsafe_code)] #[allow(unsafe_code)]
pub fn register_cancel_handler() { pub fn register_cancel_handler() {
static STRIKES: AtomicU64 = AtomicU64::new(0);
#[cfg(target_family = "unix")] #[cfg(target_family = "unix")]
unsafe { unsafe {
fn estop() {
unsafe {
let pid = libc::getpid();
libc::kill(pid, libc::SIGTERM);
libc::sleep(5);
libc::kill(pid, libc::SIGTERM);
libc::sleep(2);
libc::kill(pid, libc::SIGKILL);
}
}
unsafe extern "C" fn cancel_handler(_: libc::c_int) { unsafe extern "C" fn cancel_handler(_: libc::c_int) {
log::error!("Received cancel signal, stopping thread"); let strikes = STRIKES.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
log::error!(
"Thread blocked for too long, force pthread_exit() (strikes: {}/{})",
strikes,
5
);
if strikes == 5 {
log::error!("Too many strikes, exiting");
estop();
}
libc::pthread_exit(&sandbox::EXIT_TIMEOUT as *const _ as *mut _); libc::pthread_exit(&sandbox::EXIT_TIMEOUT as *const _ as *mut _);
} }
if libc::signal(libc::SIGUSR1, cancel_handler as usize) == libc::SIG_ERR { if libc::signal(libc::SIGUSR1, cancel_handler as usize) == libc::SIG_ERR {
log::error!( log::error!(
"Failed to register cancel handler: {}", "Failed to register worker cancel handler: {}",
std::io::Error::last_os_error() std::io::Error::last_os_error()
); );
} }
log::info!("Registered worker cancel handler.");
} }
} }

View file

@ -227,6 +227,8 @@ fn main() {
} }
} }
yumechi_no_kuni_proxy_worker::register_cancel_handler();
let runtime = tokio::runtime::Builder::new_multi_thread() let runtime = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(512) .max_blocking_threads(512)
.enable_all() .enable_all()
@ -236,23 +238,8 @@ fn main() {
log::info!("Spawned Tokio reactor."); log::info!("Spawned Tokio reactor.");
yumechi_no_kuni_proxy_worker::register_cancel_handler();
runtime runtime
.block_on(async move { .block_on(async move {
#[cfg(not(feature = "axum-server"))]
{
let listener = tokio::net::TcpListener::from_std(listener)
.expect("Failed to pass socket to Tokio");
log::warn!(
"Built without axum-server feature, using hyper without graceful shutdown"
);
log::info!("Ready for connections.");
axum::serve(listener, ms).await.expect("Failed to serve");
return;
}
#[cfg(feature = "metrics")] #[cfg(feature = "metrics")]
{ {
let reg = prometheus::default_registry(); let reg = prometheus::default_registry();
@ -298,6 +285,19 @@ fn main() {
}); });
} }
#[cfg(not(feature = "axum-server"))]
{
let listener = tokio::net::TcpListener::from_std(listener)
.expect("Failed to pass socket to Tokio");
log::warn!(
"Built without axum-server feature, using hyper without graceful shutdown"
);
log::info!("Ready for connections.");
axum::serve(listener, ms).await.expect("Failed to serve");
return;
}
#[cfg(feature = "axum-server")] #[cfg(feature = "axum-server")]
{ {
let handle = axum_server::Handle::new(); let handle = axum_server::Handle::new();