Compare commits

..

No commits in common. "35806a205895f43ebb2dd9b095f269338e5f2c2c" and "5c0d12cafe32822d26a2e00332c12523d4a0773e" have entirely different histories.

4 changed files with 21 additions and 50 deletions

View file

@ -2,13 +2,12 @@
This is a misskey proxy worker for ゆめちのくに (Yumechi-no-kuni) instance. Runs natively on both local and Cloudflare Workers environments!
It has been deployed on my instance for since 11/14 under the AppArmor deployment profile.
Currently to do:
Work in progress! Currently to do:
- [X] Content-Type sniffing
- [X] SVG rendering
- [ ] Font rendering (will not run on Cloudflare Workers Free plan)
- [ ] Font rendering (likely will not run on Cloudflare Workers Free plan)
- [X] Preset image resizing
- [X] Opportunistic Redirection on large video files
- [X] RFC9110 compliant proxy loop detection with defensive programming against known vulnerable proxies
@ -106,8 +105,4 @@ All major distros should have an easy-to-follow guide on how to do this. Typical
This will create a highly restrictive environment: try it yourself with `aa-exec -p yumechi-no-kuni-proxy-worker [initial_foothold]` and see if you can break out :). And that is just the first layer of defense, try the more restrictive subprofiles:
- `yumechi-no-kuni-proxy-worker//serve`: irreversibly dropped into before listening on the network begins. Restrict loading additional code and access to configuration files.
- `yumechi-no-kuni-proxy-worker//serve//image`: absolutely no file, network or capability access.
## Docker
If you still for some reason want to use Docker, you can use the `Dockerfile` provided.
- `yumechi-no-kuni-proxy-worker//serve//image`: absolutely no file, network or capability access.

View file

@ -74,8 +74,7 @@ profile yumechi-no-kuni-proxy-worker @{prog_path} {
/{,usr/}{,local/}{,s}bin/@{prog} ixr,
owner /var/lib/@{prog}/{,bin}/@{prog} ixr,
signal (send, receive) set=int,term,kill peer=yume-proxy-workers//serve,
signal (send) set=int,term,kill,usr1 peer=yume-proxy-workers//serve//image,
signal (send) peer=yume-proxy-workers//serve//image,
^image {

View file

@ -828,42 +828,19 @@ pub struct App<C: UpstreamClient, S: Sandboxing> {
/// without triggering the resource limits.
#[allow(unsafe_code)]
pub fn register_cancel_handler() {
static STRIKES: AtomicU64 = AtomicU64::new(0);
#[cfg(target_family = "unix")]
unsafe {
fn estop() {
unsafe {
let pid = libc::getpid();
libc::kill(pid, libc::SIGTERM);
libc::sleep(5);
libc::kill(pid, libc::SIGTERM);
libc::sleep(2);
libc::kill(pid, libc::SIGKILL);
}
}
unsafe extern "C" fn cancel_handler(_: libc::c_int) {
let strikes = STRIKES.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
log::error!(
"Thread blocked for too long, force pthread_exit() (strikes: {}/{})",
strikes,
5
);
if strikes == 5 {
log::error!("Too many strikes, exiting");
estop();
}
log::error!("Received cancel signal, stopping thread");
libc::pthread_exit(&sandbox::EXIT_TIMEOUT as *const _ as *mut _);
}
if libc::signal(libc::SIGUSR1, cancel_handler as usize) == libc::SIG_ERR {
log::error!(
"Failed to register worker cancel handler: {}",
"Failed to register cancel handler: {}",
std::io::Error::last_os_error()
);
}
log::info!("Registered worker cancel handler.");
}
}

View file

@ -227,8 +227,6 @@ fn main() {
}
}
yumechi_no_kuni_proxy_worker::register_cancel_handler();
let runtime = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(512)
.enable_all()
@ -238,8 +236,23 @@ fn main() {
log::info!("Spawned Tokio reactor.");
yumechi_no_kuni_proxy_worker::register_cancel_handler();
runtime
.block_on(async move {
#[cfg(not(feature = "axum-server"))]
{
let listener = tokio::net::TcpListener::from_std(listener)
.expect("Failed to pass socket to Tokio");
log::warn!(
"Built without axum-server feature, using hyper without graceful shutdown"
);
log::info!("Ready for connections.");
axum::serve(listener, ms).await.expect("Failed to serve");
return;
}
#[cfg(feature = "metrics")]
{
let reg = prometheus::default_registry();
@ -285,19 +298,6 @@ fn main() {
});
}
#[cfg(not(feature = "axum-server"))]
{
let listener = tokio::net::TcpListener::from_std(listener)
.expect("Failed to pass socket to Tokio");
log::warn!(
"Built without axum-server feature, using hyper without graceful shutdown"
);
log::info!("Ready for connections.");
axum::serve(listener, ms).await.expect("Failed to serve");
return;
}
#[cfg(feature = "axum-server")]
{
let handle = axum_server::Handle::new();