Kernel module basic ipv4 with debug settings

This commit is contained in:
Vadim Vetrov 2024-09-01 16:07:47 +03:00
parent 0a679ea41c
commit 27629ba0cc
No known key found for this signature in database
GPG Key ID: E8A308689D7A73A5
9 changed files with 397 additions and 278 deletions

2
Kbuild
View File

@ -1,3 +1,3 @@
obj-m := ipt_YTUNBLOCK.o
ipt_YTUNBLOCK-objs := iptk_YTUNBLOCK.o mangle.o
ipt_YTUNBLOCK-objs := iptk_YTUNBLOCK.o mangle.o quic.o utils.o
ccflags-y := -std=gnu11 -Wno-unused-variable -DKERNEL_SPACE -DDEBUG

View File

@ -1,6 +1,10 @@
#ifndef YTB_CONFIG_H
#define YTB_CONFIG_H
#ifndef KERNEL_SPACE
#define USER_SPACE
#endif
typedef int (*raw_send_t)(const unsigned char *data, unsigned int data_len);
/**
* Sends the packet after delay_ms. The function should schedule send and return immediately

View File

@ -13,9 +13,56 @@
#include "mangle.h"
#include "config.h"
#include "raw_replacements.h"
#include "utils.h"
#include "logging.h"
struct config_t config = {
.threads = THREADS_NUM,
.frag_sni_reverse = 1,
.frag_sni_faked = 0,
.fragmentation_strategy = FRAGMENTATION_STRATEGY,
.faking_strategy = FAKING_STRATEGY,
.faking_ttl = FAKE_TTL,
.fake_sni = 1,
.fake_sni_seq_len = 1,
.frag_middle_sni = 1,
.frag_sni_pos = 1,
.use_ipv6 = 1,
.fakeseq_offset = 10000,
.mark = DEFAULT_RAWSOCKET_MARK,
.synfake = 0,
.synfake_len = 0,
.sni_detection = SNI_DETECTION_PARSE,
#ifdef SEG2_DELAY
.seg2_delay = SEG2_DELAY,
#else
.seg2_delay = 0,
#endif
#ifdef USE_GSO
.use_gso = 1,
#else
.use_gso = false,
#endif
#ifdef DEBUG
.verbose = 2,
#else
.verbose = 0,
#endif
.domains_str = defaul_snistr,
.domains_strlen = sizeof(defaul_snistr),
.queue_start_num = DEFAULT_QUEUE_NUM,
.fake_sni_pkt = fake_sni_old,
.fake_sni_pkt_sz = sizeof(fake_sni_old) - 1, // - 1 for null-terminator
};
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
MODULE_VERSION("0.3.2");
MODULE_AUTHOR("Vadim Vetrov <vetrovvd@gmail.com>");
MODULE_DESCRIPTION("Linux kernel module for youtube unblock");
@ -37,7 +84,7 @@ static int open_raw_socket(void) {
.is_kernel = 1
};
int mark = RAWSOCKET_MARK;
int mark = config.mark;
optval.kernel = &mark;
ret = sock_setsockopt(rawsocket, SOL_SOCKET, SO_MARK, optval, sizeof(mark));
if (ret < 0)
@ -59,67 +106,12 @@ static void close_raw_socket(void) {
sock_release(rawsocket);
}
#define AVAILABLE_MTU 1384
static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
if (pktlen > AVAILABLE_MTU) {
pr_warn("The packet is too big and may cause issues!");
__u32 buff1_size = pktlen;
__u32 buff2_size = pktlen;
__u8 *buff1 = kmalloc(pktlen, GFP_ATOMIC);
if (buff1 == NULL) return -1;
__u8 *buff2 = kmalloc(pktlen, GFP_ATOMIC);
if (buff2 == NULL) {
kfree(buff1);
return -1;
}
int ret;
#if defined(USE_TCP_SEGMENTATION) || defined(RAWSOCK_TCP_FSTRAT)
if ((ret = tcp4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0)
return ret;
#elif defined(USE_IP_FRAGMENTATION) || defined(RAWSOCK_IP_FSTRAT)
if ((ret = ip4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0)
return ret;
#else
pr_warn("send_raw_socket: Packet is too big but fragmentation is disabled! "
"Pass -DRAWSOCK_TCP_FSTRAT or -DRAWSOCK_IP_FSTRAT as CFLAGS "
"To enable it only for raw socket\n");
return -EINVAL;
#endif
int sent = 0;
ret = send_raw_socket(buff1, buff1_size);
if (ret >= 0) sent += ret;
else {
kfree(buff1);
kfree(buff2);
return ret;
}
kfree(buff1);
ret = send_raw_socket(buff2, buff2_size);
if (ret >= 0) sent += ret;
else {
kfree(buff2);
return ret;
}
kfree(buff2);
return sent;
}
static int send_raw_ipv4(const uint8_t *pkt, uint32_t pktlen) {
int ret = 0;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
struct iphdr *iph;
int ret;
if ((ret = ip4_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
return ret;
@ -151,150 +143,130 @@ static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
return ret;
}
static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
int ret;
if (pktlen > AVAILABLE_MTU) {
pr_warn("The packet is too big and may cause issues!");
NETBUF_ALLOC(buff1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buff1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(buff2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buff2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(buff2);
return -ENOMEM;
}
uint32_t buff1_size = MAX_PACKET_SIZE;
uint32_t buff2_size = MAX_PACKET_SIZE;
switch (config.fragmentation_strategy) {
case FRAG_STRAT_TCP:
if ((ret = tcp_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
goto erret_lc;
}
break;
case FRAG_STRAT_IP:
if ((ret = ip4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
goto erret_lc;
}
break;
default:
pr_warn("send_raw_socket: Packet is too big but fragmentation is disabled!");
ret = -EINVAL;
goto erret_lc;
}
int sent = 0;
ret = send_raw_socket(buff1, buff1_size);
if (ret >= 0) sent += ret;
else {
goto erret_lc;
}
ret = send_raw_socket(buff2, buff2_size);
if (ret >= 0) sent += ret;
else {
goto erret_lc;
}
NETBUF_FREE(buff1);
NETBUF_FREE(buff2);
return sent;
erret_lc:
NETBUF_FREE(buff1);
NETBUF_FREE(buff2);
return ret;
}
int ipvx = netproto_version(pkt, pktlen);
if (ipvx == IP4VERSION)
return send_raw_ipv4(pkt, pktlen);
// else if (ipvx == IP6VERSION)
// return send_raw_ipv6(pkt, pktlen);
printf("proto version %d is unsupported\n", ipvx);
return -EINVAL;
}
static void delay_packet_send(const unsigned char *data, unsigned int data_len, unsigned int delay_ms) {
pr_warn("delay_packet_send won't work on current youtubeUnblock version");
send_raw_socket(data, data_len);
}
struct instance_config_t instance_config = {
.send_raw_packet = send_raw_socket,
.send_delayed_packet = delay_packet_send,
};
static unsigned int ykb_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
if ((skb->mark & RAWSOCKET_MARK) == RAWSOCKET_MARK)
if ((skb->mark & config.mark) == config.mark)
return XT_CONTINUE;
if (skb->head == NULL) return XT_CONTINUE;
// TODO: Mallocs are bad!
uint32_t buflen = skb->len;
__u8 *buf = kmalloc(skb->len, GFP_ATOMIC);
if (buf == NULL) {
pr_err("Cannot alloc enough buffer space");
if (buflen > MAX_PACKET_SIZE)
goto accept;
NETBUF_ALLOC(buf, buflen);
if (!NETBUF_CHECK(buf))
goto no_free;
if (skb_copy_bits(skb, 0, buf, buflen) < 0) {
pr_err("Unable copy bits\n");
goto accept;
}
if (skb_copy_bits(skb, 0, buf, skb->len) < 0) {
pr_err("Unable copy bits\n");
goto ac_fkb;
int vrd = process_packet(buf, buflen);
switch(vrd) {
case PKT_ACCEPT:
goto accept;
case PKT_DROP:
goto drop;
}
struct iphdr *iph;
uint32_t iph_len;
struct tcphdr *tcph;
uint32_t tcph_len;
__u8 *payload;
uint32_t plen;
int ret = tcp4_payload_split(buf, buflen, &iph, &iph_len,
&tcph, &tcph_len, &payload, &plen);
if (ret < 0)
goto ac_fkb;
struct verdict vrd = analyze_tls_data(payload, plen);
if (vrd.gvideo_hello) {
int ret;
pr_info("Googlevideo detected\n");
ip4_set_checksum(iph);
tcp4_set_checksum(tcph, iph);
uint32_t f1len = skb->len;
uint32_t f2len = skb->len;
__u8 *frag1 = kmalloc(f1len, GFP_ATOMIC);
if (!frag1) {
pr_err("Cannot alloc enough gv frag1 buffer space");
goto ac_fkb;
}
__u8 *frag2 = kmalloc(f2len, GFP_ATOMIC);
if (!frag2) {
pr_err("Cannot alloc enough gv frag1 buffer space");
kfree(frag1);
goto ac_fkb;
}
#ifdef FAKE_SNI
uint32_t fksn_len = FAKE_SNI_MAXLEN;
__u8 *fksn_buf = kmalloc(fksn_len, GFP_ATOMIC);
if (!fksn_buf) {
pr_err("Cannot alloc enough gksn buffer space");
goto fallback;
}
ret = gen_fake_sni(iph, tcph, fksn_buf, &fksn_len);
if (ret < 0) {
pr_err("Cannot alloc enough gksn buffer space");
goto fksn_fb;
}
#endif
#if defined(USE_TCP_SEGMENTATION)
size_t ipd_offset = vrd.sni_offset;
size_t mid_offset = ipd_offset + vrd.sni_len / 2;
if ((ret = tcp4_frag(buf, skb->len,
mid_offset, frag1, &f1len, frag2, &f2len)) < 0) {
pr_err("tcp4_frag: %d", ret);
goto fksn_fb;
}
#elif defined(USE_IP_FRAGMENTATION)
size_t ipd_offset = tcph_len + vrd.sni_offset;
size_t mid_offset = ipd_offset + vrd.sni_len / 2;
mid_offset += 8 - mid_offset % 8;
if ((ret = ip4_frag(buf, skb->len,
mid_offset, frag1, &f1len, frag2, &f2len)) < 0) {
pr_err("ip4_frag: %d", ret);
goto fksn_fb;
}
#endif
#ifdef FAKE_SNI
ret = send_raw_socket(fksn_buf, fksn_len);
if (ret < 0) {
pr_err("fksn_send: %d", ret);
goto fksn_fb;
}
#endif
#if defined(USE_NO_FRAGMENTATION)
#ifdef SEG2_DELAY
#error "SEG2_DELAY is incompatible with NO FRAGMENTATION"
#endif
ret = send_raw_socket(buf, buflen);
if (ret < 0) {
pr_err("nofrag_send: %d", ret);
}
goto fksn_fb;
#endif
ret = send_raw_socket(frag2, f2len);
if (ret < 0) {
pr_err("raw frag2 send: %d", ret);
goto fksn_fb;
}
#ifdef SEG2_DELAY
#error "Seg2 delay is unsupported yet for kmod"
#else
ret = send_raw_socket(frag1, f1len);
if (ret < 0) {
pr_err("raw frag1 send: %d", ret);
goto fksn_fb;
}
#endif
fksn_fb:
#ifdef FAKE_SNI
kfree(fksn_buf);
#endif
fallback:
#ifndef SEG2_DELAY
kfree(frag1);
#endif
kfree(frag2);
kfree(buf);
kfree_skb(skb);
return NF_STOLEN;
}
ac_fkb:
kfree(buf);
accept:
NETBUF_FREE(buf);
no_free:
return XT_CONTINUE;
drop:
NETBUF_FREE(buf);
kfree_skb(skb);
return NF_STOLEN;
}
static int ykb_chk(const struct xt_tgchk_param *par) {

View File

@ -9,7 +9,7 @@
#define printf pr_info
#define perror pr_err
#define lgerror(msg, ret, ...) __extension__ ({ \
printf(msg ": %d\n", ##__VA_ARGS__, ret); \
pr_err(msg ": %d\n", ##__VA_ARGS__, ret); \
})
#else
#include <stdio.h> // IWYU pragma: export

215
mangle.c
View File

@ -6,7 +6,7 @@
#include "quic.h"
#include "logging.h"
#ifndef KERNEL_SCOPE
#ifndef KERNEL_SPACE
#include <stdlib.h>
#endif
@ -25,6 +25,7 @@ int process_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
int ipver = netproto_version(raw_payload, raw_payload_len);
int ret;
if (ipver == IP4VERSION) {
ret = ip4_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct iphdr **)&iph, &iph_len,
@ -33,7 +34,7 @@ int process_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
if (ret < 0)
goto accept;
transport_proto = iph ->protocol;
transport_proto = iph->protocol;
} else if (ipver == IP6VERSION && config.use_ipv6) {
ret = ip6_payload_split((uint8_t *)raw_payload, raw_payload_len,
@ -43,7 +44,7 @@ int process_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
if (ret < 0)
goto accept;
transport_proto = ip6h->ip6_ctlun.ip6_un1.ip6_un1_nxt;
transport_proto = ip6h->ip6_nxt;
} else {
lgtracemsg("Unknown layer 3 protocol version: %d", ipver);
@ -74,6 +75,7 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
const uint8_t *data;
uint32_t dlen;
int ipxv = netproto_version(raw_payload, raw_payload_len);
lgtrace_start("TCP");
@ -89,9 +91,15 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
goto accept;
}
if (tcph->syn) {
if (tcph->syn && config.synfake) {
lgtrace_addp("TCP syn alter");
uint8_t payload[MAX_PACKET_SIZE];
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
lgerror("Allocation error", -ENOMEM);
goto accept;
}
memcpy(payload, ipxh, iph_len);
memcpy(payload + iph_len, tcph, tcph_len);
uint32_t fake_len = config.fake_sni_pkt_sz;
@ -110,32 +118,40 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
set_tcp_checksum(tcph, iph, iph_len);
} else if (ipxv == IP6VERSION) {
struct ip6_hdr *ip6h = (struct ip6_hdr *)payload;
ip6h->ip6_ctlun.ip6_un1.ip6_un1_plen =
ntohs(tcph_len + fake_len);
ip6h->ip6_plen = ntohs(tcph_len + fake_len);
set_ip_checksum(ip6h, iph_len);
set_tcp_checksum(tcph, ip6h, iph_len);
}
ret = instance_config.send_raw_packet(payload, iph_len + tcph_len + fake_len);
if (ret < 0) {
lgerror("send_syn_altered", ret);
NETBUF_FREE(payload);
goto accept;
}
lgtrace_addp("rawsocket sent %d", ret);
NETBUF_FREE(payload);
goto drop;
}
if (tcph->syn) goto accept;
struct tls_verdict vrd = analyze_tls_data(data, dlen);
lgtrace_addp("Analyzed, %d", vrd.target_sni);
if (vrd.target_sni) {
lgdebugmsg("Target SNI detected: %.*s", vrd.sni_len, data + vrd.sni_offset);
uint8_t payload[MAX_PACKET_SIZE];
uint32_t payload_len = raw_payload_len;
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
lgerror("Allocation error", -ENOMEM);
goto accept;
}
memcpy(payload, raw_payload, raw_payload_len);
void *iph;
@ -151,7 +167,7 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
if (ret < 0) {
lgerror("tcp_payload_split in targ_sni", ret);
goto accept;
goto accept_lc;
}
if (config.fk_winsize) {
@ -198,10 +214,10 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
ret = send_tcp_frags(payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror("tcp4 send frags", ret);
goto accept;
goto accept_lc;
}
goto drop;
goto drop_lc;
}
break;
case FRAG_STRAT_IP:
@ -232,11 +248,10 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
ret = send_ip4_frags(payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror("ip4 send frags", ret);
goto accept;
goto accept_lc;
}
goto drop;
break;
goto drop_lc;
} else {
printf("WARNING: IP fragmentation is supported only for IPv4\n");
}
@ -244,15 +259,23 @@ int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror("raw pack send", ret);
goto accept;
goto accept_lc;
}
goto drop;
goto drop_lc;
}
goto drop_lc;
accept_lc:
NETBUF_FREE(payload);
goto accept;
drop_lc:
NETBUF_FREE(payload);
goto drop;
}
accept:
@ -356,8 +379,19 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
packet, pktlen);
}
} else {
uint8_t frag1[MAX_PACKET_SIZE];
uint8_t frag2[MAX_PACKET_SIZE];
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
return -ENOMEM;
}
uint32_t f1len = MAX_PACKET_SIZE;
uint32_t f2len = MAX_PACKET_SIZE;
@ -365,7 +399,8 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (dvs > poses[0]) {
lgerror("send_frags: Recursive dvs(%d) is more than poses0(%d)", -EINVAL, dvs, poses[0]);
return -EINVAL;
ret = -EINVAL;
goto erret_lc;
}
ret = ip4_frag(packet, pktlen, poses[0] - dvs,
@ -373,7 +408,7 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (ret < 0) {
lgerror("send_frags: frag: with context packet with size %d, position: %d, recursive dvs: %d", ret, pktlen, poses[0], dvs);
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
@ -381,21 +416,30 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
send_frag1:
ret = send_ip4_frags(frag1, f1len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto out;
goto out_lc;
send_frag2:
dvs += poses[0];
ret = send_ip4_frags(frag2, f2len, poses + 1, poses_sz - 1, dvs);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto send_frag1;
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
return ret;
}
out:
@ -419,9 +463,28 @@ int send_tcp_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
packet, pktlen);
}
} else {
uint8_t frag1[MAX_PACKET_SIZE];
uint8_t frag2[MAX_PACKET_SIZE];
uint8_t fake_pad[MAX_PACKET_SIZE];
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
return -ENOMEM;
}
NETBUF_ALLOC(fake_pad, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_pad)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
return -ENOMEM;
}
uint32_t f1len = MAX_PACKET_SIZE;
uint32_t f2len = MAX_PACKET_SIZE;
@ -429,7 +492,8 @@ int send_tcp_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (dvs > poses[0]) {
lgerror("send_frags: Recursive dvs(%d) is more than poses0(%d)", -EINVAL, dvs, poses[0]);
return -EINVAL;
ret = -EINVAL;
goto erret_lc;
}
@ -440,7 +504,7 @@ int send_tcp_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (ret < 0) {
lgerror("send_frags: tcp_frag: with context packet with size %d, position: %d, recursive dvs: %d", ret, pktlen, poses[0], dvs);
return ret;
goto erret_lc;
}
@ -451,21 +515,20 @@ send_frag1:
{
ret = send_tcp_frags(frag1, f1len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto out;
goto out_lc;
}
send_fake:
// TODO
if (config.frag_sni_faked) {
uint32_t iphfl, tcphfl;
ret = tcp_payload_split(frag2, f2len, NULL, &iphfl, NULL, &tcphfl, NULL, NULL);
if (ret < 0) {
lgerror("Invalid frag2", ret);
return ret;
goto erret_lc;
}
memcpy(fake_pad, frag2, iphfl + tcphfl);
memset(fake_pad + iphfl + tcphfl, 0, f2len - iphfl - tcphfl);
@ -478,11 +541,11 @@ send_fake:
ret = fail_packet(fake_pad, f2len);
if (ret < 0) {
lgerror("Failed to fail packet", ret);
return ret;
goto erret_lc;
}
ret = send_tcp_frags(fake_pad, f2len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
}
@ -495,12 +558,22 @@ send_frag2:
dvs += poses[0];
ret = send_tcp_frags(frag2, f2len, poses + 1, poses_sz - 1, dvs);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto send_fake;
}
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
NETBUF_FREE(fake_pad);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
NETBUF_FREE(fake_pad);
return ret;
}
out:
return 0;
@ -520,20 +593,24 @@ int post_fake_sni(const void *iph, unsigned int iph_len,
struct tcphdr *fstcph = (void *)rfstcph;
for (int i = 0; i < sequence_len; i++) {
uint8_t fake_sni[MAX_PACKET_SIZE];
NETBUF_ALLOC(fake_sni, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_sni)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
uint32_t fsn_len = MAX_PACKET_SIZE;
ret = gen_fake_sni(fsiph, iph_len, fstcph, tcph_len,
fake_sni, &fsn_len);
if (ret < 0) {
lgerror("gen_fake_sni", ret);
return ret;
goto erret_lc;
}
lgtrace_addp("post fake sni #%d", i + 1);
ret = instance_config.send_raw_packet(fake_sni, fsn_len);
if (ret < 0) {
lgerror("send fake sni", ret);
return ret;
goto erret_lc;
}
uint32_t iph_len;
@ -551,32 +628,17 @@ int post_fake_sni(const void *iph, unsigned int iph_len,
memcpy(rfstcph, fstcph, tcph_len);
fsiph = (void *)rfsiph;
fstcph = (void *)rfstcph;
out_lc:
NETBUF_FREE(fake_sni);
continue;
erret_lc:
NETBUF_FREE(fake_sni);
return ret;
}
return 0;
}
void z_function(const char *str, int *zbuf, size_t len) {
zbuf[0] = len;
ssize_t lh = 0, rh = 1;
for (ssize_t i = 1; i < len; i++) {
zbuf[i] = 0;
if (i < rh) {
zbuf[i] = min(zbuf[i - lh], rh - i);
}
while (i + zbuf[i] < len && str[zbuf[i]] == str[i + zbuf[i]])
zbuf[i]++;
if (i + zbuf[i] > rh) {
lh = i;
rh = i + zbuf[i];
}
}
}
#define TLS_CONTENT_TYPE_HANDSHAKE 0x16
#define TLS_HANDSHAKE_TYPE_CLIENT_HELLO 0x01
#define TLS_EXTENSION_SNI 0x0000
@ -758,12 +820,26 @@ brute:
config.domains_str[i] == ',' ||
config.domains_str[i] == '\n' )) {
uint8_t buf[MAX_PACKET_SIZE];
int zbuf[MAX_PACKET_SIZE];
unsigned int domain_len = (i - j);
const char *domain_startp = config.domains_str + j;
if (domain_len + dlen + 1> MAX_PACKET_SIZE) continue;
if (domain_len + dlen + 1> MAX_PACKET_SIZE) {
continue;
}
NETBUF_ALLOC(buf, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buf)) {
lgerror("Allocation error", -ENOMEM);
goto out;
}
NETBUF_ALLOC(nzbuf, MAX_PACKET_SIZE * sizeof(int));
if (!NETBUF_CHECK(nzbuf)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(buf);
goto out;
}
int *zbuf = (void *)nzbuf;
memcpy(buf, domain_startp, domain_len);
memcpy(buf + domain_len, "#", 1);
@ -776,12 +852,17 @@ brute:
vrd.target_sni = 1;
vrd.sni_len = domain_len;
vrd.sni_offset = (k - domain_len - 1);
NETBUF_FREE(buf);
NETBUF_FREE(nzbuf);
goto out;
}
}
j = i + 1;
NETBUF_FREE(buf);
NETBUF_FREE(nzbuf);
}
}
@ -811,7 +892,7 @@ int gen_fake_sni(const void *ipxh, uint32_t iph_len,
memcpy(buf, iph, iph_len);
struct ip6_hdr *niph = (struct ip6_hdr *)buf;
niph->ip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_TCP;
niph->ip6_nxt = IPPROTO_TCP;
} else {
return -EINVAL;
}
@ -834,7 +915,7 @@ int gen_fake_sni(const void *ipxh, uint32_t iph_len,
niph->tot_len = htons(dlen);
} else if (ipxv == IP6VERSION) {
struct ip6_hdr *niph = (struct ip6_hdr *)buf;
niph->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(dlen - iph_len);
niph->ip6_plen = htons(dlen - iph_len);
}
fail_packet(buf, *buflen);
@ -868,7 +949,7 @@ int fail_packet(uint8_t *payload, uint32_t plen) {
if (config.fakeseq_offset) {
tcph->seq = htonl(ntohl(tcph->seq) - config.fakeseq_offset);
} else {
#ifdef KERNEL_SCOPE
#ifdef KERNEL_SPACE
tcph->seq = 124;
#else
tcph->seq = random();
@ -889,7 +970,7 @@ int fail_packet(uint8_t *payload, uint32_t plen) {
if (ipxv == IP4VERSION) {
((struct iphdr *)iph)->ttl = config.faking_ttl;
} else if (ipxv == IP6VERSION) {
((struct ip6_hdr *)iph)->ip6_ctlun.ip6_un1.ip6_un1_hlim = config.faking_ttl;
((struct ip6_hdr *)iph)->ip6_hops = config.faking_ttl;
} else {
lgerror("fail_packet: IP version is unsupported", -EINVAL);
return -EINVAL;

2
quic.c
View File

@ -48,7 +48,7 @@ int quic_parse_data(uint8_t *raw_payload, uint32_t raw_payload_len,
}
uint8_t found = 0;
for (uint8_t i = 0; i < sizeof(supported_versions); i++) {
for (uint8_t i = 0; i < 2; i++) {
if (ntohl(nqch->version) == supported_versions[i]) {
found = 1;
}

57
types.h
View File

@ -3,7 +3,7 @@
#define TYPES_H
#include <asm/byteorder.h>
#ifdef KERNEL_SCOPE
#ifdef KERNEL_SPACE
#include <linux/errno.h> // IWYU pragma: export
#include <linux/string.h> // IWYU pragma: export
@ -12,17 +12,17 @@ typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
typedef __u64 uint64_t;
typedef __i8 int8_t;
typedef __i16 int16_t;
typedef __i32 int32_t;
typedef __i64 int64_t;
#else /* USERSPACE_SCOPE */
//typedef __i8 int8_t;
//typedef __i16 int16_t;
//typedef __i32 int32_t;
//typedef __i64 int64_t;
#else /* USER_SPACE */
#include <errno.h> // IWYU pragma: export
#include <stdint.h> // IWYU pragma: export
#include <string.h> // IWYU pragma: export
#endif /* SCOPES */
#endif /* SPACES */
// Network specific structures
#ifdef KERNEL_SPACE
@ -30,15 +30,35 @@ typedef __i64 int64_t;
#include <linux/net.h> // IWYU pragma: export
#include <linux/in.h> // IWYU pragma: export
#include <linux/ip.h> // IWYU pragma: export
#include <linux/ipv6.h> // IWYU pragma: export
#include <linux/tcp.h> // IWYU pragma: export
#define ip6_hdr ipv6hdr
/* from <netinet/ip.h> */
#define IP_RF 0x8000 /* reserved fragment flag */
#define IP_DF 0x4000 /* dont fragment flag */
#define IP_MF 0x2000 /* more fragments flag */
#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
#ifdef __LITTLE_ENDIAN
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __LITTLE_ENDIAN
#elif defined(__BIG_ENDIAN)
#define __LITTLE_ENDIAN 1234
#define __BYTE_ORDER __BIG_ENDIAN
#else
#define USER_SPACE
#error "Unsupported endian"
#endif
#define ip6_plen payload_len
#define ip6_nxt nexthdr
#define ip6_hops hop_limit
#define ip6_hlim hop_limit
#define ip6_src saddr
#define ip6_dst daddr
#else /* USER_SPACE */
#include <arpa/inet.h> // IWYU pragma: export
#include <netinet/ip.h> // IWYU pragma: export
#include <netinet/ip6.h> // IWYU pragma: export
@ -46,6 +66,8 @@ typedef __i64 int64_t;
#include <netinet/udp.h> // IWYU pragma: export
#endif
#ifndef KERNEL_SPACE
#define max(a,b)__extension__\
({ \
__typeof__ (a) _a = (a); \
@ -60,4 +82,23 @@ typedef __i64 int64_t;
_a < _b ? _a : _b; \
})
#endif /* not a KERNEL_SPACE */
/**
* Use NETBUF_ALLOC and NETBUF_FREE as an abstraction of memory allocation.
* Do not use it within expressions, consider these defines as separate statements.
*
* Use NETBUF_CHECK to check that buffer was properly allocated.
*/
#ifdef KERNEL_SPACE
#include <linux/gfp_types.h>
#define NETBUF_ALLOC(buf, buf_len) __u8* buf = kmalloc(buf_len, GFP_ATOMIC);
#define NETBUF_CHECK(buf) ((buf) != NULL)
#define NETBUF_FREE(buf) kfree(buf);
#else
#define NETBUF_ALLOC(buf, buf_len) __u8 buf[buf_len];
#define NETBUF_CHECK(buf) (1)
#define NETBUF_FREE(buf) ;
#endif
#endif /* TYPES_H */

37
utils.c
View File

@ -1,10 +1,8 @@
#include "utils.h"
#include "logging.h"
#include <netinet/in.h>
#include "types.h"
#ifdef KERNEL_SPACE
#include <linux/ip.h>
#else
#ifndef KERNEL_SPACE
#include <stdlib.h>
#include <libnetfilter_queue/libnetfilter_queue_ipv4.h>
#include <libnetfilter_queue/libnetfilter_queue_ipv6.h>
@ -39,7 +37,7 @@ void ip4_set_checksum(struct iphdr *iph)
void tcp6_set_checksum(struct tcphdr *tcph, struct ip6_hdr *iph) {
uint16_t old_check = ntohs(tcph->check);
nfq_tcp_compute_checksum_ipv6(tcph, iph);
// nfq_tcp_compute_checksum_ipv6(tcph, iph);
}
int set_ip_checksum(void *iph, uint32_t iphb_len) {
@ -157,7 +155,7 @@ int ip6_payload_split(uint8_t *pkt, uint32_t buflen,
}
uint32_t hdr_len = sizeof(struct ip6_hdr);
uint32_t pktlen = ntohs(hdr->ip6_ctlun.ip6_un1.ip6_un1_plen);
uint32_t pktlen = ntohs(hdr->ip6_plen);
if (buflen < pktlen) {
lgerror("ip6_payload_split: buflen cmp pktlen: %d %d", -EINVAL, buflen, pktlen);
return -EINVAL;
@ -194,7 +192,7 @@ int tcp6_payload_split(uint8_t *pkt, uint32_t buflen,
if (
hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP ||
hdr->ip6_nxt != IPPROTO_TCP ||
tcph_plen < sizeof(struct tcphdr)) {
return -EINVAL;
}
@ -426,8 +424,8 @@ int tcp_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
} else {
struct ip6_hdr *s1_hdr = (void *)seg1;
struct ip6_hdr *s2_hdr = (void *)seg2;
s1_hdr->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(s1_dlen - hdr_len);
s2_hdr->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(s2_dlen - hdr_len);
s1_hdr->ip6_plen = htons(s1_dlen - hdr_len);
s2_hdr->ip6_plen = htons(s2_dlen - hdr_len);
}
struct tcphdr *s1_tcph = (void *)(seg1 + hdr_len);
@ -440,3 +438,24 @@ int tcp_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
return 0;
}
void z_function(const char *str, int *zbuf, size_t len) {
zbuf[0] = len;
ssize_t lh = 0, rh = 1;
for (ssize_t i = 1; i < len; i++) {
zbuf[i] = 0;
if (i < rh) {
zbuf[i] = min(zbuf[i - lh], rh - i);
}
while (i + zbuf[i] < len && str[zbuf[i]] == str[i + zbuf[i]])
zbuf[i]++;
if (i + zbuf[i] > rh) {
lh = i;
rh = i + zbuf[i];
}
}
}

View File

@ -89,4 +89,6 @@ void tcp6_set_checksum(struct tcphdr *tcph, struct ip6_hdr *iph);
int set_ip_checksum(void *iph, uint32_t iphb_len);
int set_tcp_checksum(struct tcphdr *tcph, void *iph, uint32_t iphb_len);
void z_function(const char *str, int *zbuf, size_t len);
#endif /* UTILS_H */