mirror of
https://github.com/Waujito/youtubeUnblock.git
synced 2024-12-22 14:26:11 +00:00
5e327497bb
support
256 lines
5.2 KiB
C
256 lines
5.2 KiB
C
#ifndef KERNEL_SPACE
|
|
#error "You are trying to compile the kernel module not in the kernel space"
|
|
#endif
|
|
#include "kmod_utils.h"
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/socket.h>
|
|
#include <linux/net.h>
|
|
|
|
#include "config.h"
|
|
#include "utils.h"
|
|
#include "logging.h"
|
|
|
|
static struct socket *rawsocket;
|
|
DEFINE_MUTEX(rslock);
|
|
|
|
static struct socket *raw6socket;
|
|
DEFINE_MUTEX(rs6lock);
|
|
|
|
|
|
int open_raw_socket(void) {
|
|
int ret = 0;
|
|
ret = sock_create(AF_INET, SOCK_RAW, IPPROTO_RAW, &rawsocket);
|
|
|
|
if (ret < 0) {
|
|
pr_alert("Unable to create raw socket\n");
|
|
goto err;
|
|
}
|
|
|
|
sockptr_t optval = {
|
|
.kernel = NULL,
|
|
.is_kernel = 1
|
|
};
|
|
|
|
int mark = config.mark;
|
|
optval.kernel = &mark;
|
|
ret = sock_setsockopt(rawsocket, SOL_SOCKET, SO_MARK, optval, sizeof(mark));
|
|
if (ret < 0)
|
|
{
|
|
pr_alert("setsockopt(SO_MARK, %d) failed\n", mark);
|
|
goto sr_err;
|
|
}
|
|
int one = 1;
|
|
optval.kernel = &one;
|
|
|
|
return 0;
|
|
sr_err:
|
|
sock_release(rawsocket);
|
|
err:
|
|
return ret;
|
|
}
|
|
|
|
void close_raw_socket(void) {
|
|
sock_release(rawsocket);
|
|
}
|
|
|
|
static int send_raw_ipv4(const uint8_t *pkt, uint32_t pktlen) {
|
|
int ret = 0;
|
|
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
|
|
|
|
struct iphdr *iph;
|
|
|
|
if ((ret = ip4_payload_split(
|
|
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
|
|
return ret;
|
|
}
|
|
|
|
struct sockaddr_in daddr = {
|
|
.sin_family = AF_INET,
|
|
.sin_port = 0,
|
|
.sin_addr = {
|
|
.s_addr = iph->daddr
|
|
}
|
|
};
|
|
|
|
struct msghdr msg;
|
|
struct kvec iov;
|
|
iov.iov_base = (__u8 *)pkt;
|
|
iov.iov_len = pktlen;
|
|
iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, 1);
|
|
|
|
msg.msg_flags = 0;
|
|
msg.msg_name = &daddr;
|
|
msg.msg_namelen = sizeof(struct sockaddr_in);
|
|
msg.msg_control = NULL;
|
|
msg.msg_controllen = 0;
|
|
|
|
mutex_lock(&rslock);
|
|
ret = kernel_sendmsg(rawsocket, &msg, &iov, 1, pktlen);
|
|
mutex_unlock(&rslock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int open_raw6_socket(void) {
|
|
int ret = 0;
|
|
ret = sock_create(AF_INET6, SOCK_RAW, IPPROTO_RAW, &raw6socket);
|
|
|
|
if (ret < 0) {
|
|
pr_alert("Unable to create raw socket\n");
|
|
goto err;
|
|
}
|
|
|
|
sockptr_t optval = {
|
|
.kernel = NULL,
|
|
.is_kernel = 1
|
|
};
|
|
|
|
int mark = config.mark;
|
|
optval.kernel = &mark;
|
|
ret = sock_setsockopt(raw6socket, SOL_SOCKET, SO_MARK, optval, sizeof(mark));
|
|
if (ret < 0)
|
|
{
|
|
pr_alert("setsockopt(SO_MARK, %d) failed\n", mark);
|
|
goto sr_err;
|
|
}
|
|
int one = 1;
|
|
optval.kernel = &one;
|
|
|
|
return 0;
|
|
sr_err:
|
|
sock_release(raw6socket);
|
|
err:
|
|
return ret;
|
|
}
|
|
|
|
void close_raw6_socket(void) {
|
|
sock_release(raw6socket);
|
|
}
|
|
|
|
int send_raw_ipv6(const uint8_t *pkt, uint32_t pktlen) {
|
|
int ret = 0;
|
|
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
|
|
|
|
struct ip6_hdr *iph;
|
|
|
|
if ((ret = ip6_payload_split(
|
|
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
|
|
return ret;
|
|
}
|
|
|
|
struct sockaddr_in6 daddr = {
|
|
.sin6_family = AF_INET6,
|
|
/* Always 0 for raw socket */
|
|
.sin6_port = 0,
|
|
.sin6_addr = iph->ip6_dst
|
|
};
|
|
|
|
struct msghdr msg;
|
|
struct kvec iov;
|
|
iov.iov_base = (__u8 *)pkt;
|
|
iov.iov_len = pktlen;
|
|
iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, 1);
|
|
|
|
msg.msg_flags = 0;
|
|
msg.msg_name = &daddr;
|
|
msg.msg_namelen = sizeof(struct sockaddr_in6);
|
|
msg.msg_control = NULL;
|
|
msg.msg_controllen = 0;
|
|
|
|
mutex_lock(&rs6lock);
|
|
ret = kernel_sendmsg(raw6socket, &msg, &iov, 1, pktlen);
|
|
mutex_unlock(&rs6lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
|
|
int ret;
|
|
|
|
if (pktlen > AVAILABLE_MTU) {
|
|
pr_warn("The packet is too big and may cause issues!");
|
|
|
|
NETBUF_ALLOC(buff1, MAX_PACKET_SIZE);
|
|
if (!NETBUF_CHECK(buff1)) {
|
|
lgerror("Allocation error", -ENOMEM);
|
|
return -ENOMEM;
|
|
}
|
|
NETBUF_ALLOC(buff2, MAX_PACKET_SIZE);
|
|
if (!NETBUF_CHECK(buff2)) {
|
|
lgerror("Allocation error", -ENOMEM);
|
|
NETBUF_FREE(buff2);
|
|
return -ENOMEM;
|
|
}
|
|
uint32_t buff1_size = MAX_PACKET_SIZE;
|
|
uint32_t buff2_size = MAX_PACKET_SIZE;
|
|
|
|
switch (config.fragmentation_strategy) {
|
|
case FRAG_STRAT_TCP:
|
|
if ((ret = tcp_frag(pkt, pktlen, AVAILABLE_MTU-128,
|
|
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
|
|
|
|
goto erret_lc;
|
|
}
|
|
break;
|
|
case FRAG_STRAT_IP:
|
|
if ((ret = ip4_frag(pkt, pktlen, AVAILABLE_MTU-128,
|
|
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
|
|
|
|
goto erret_lc;
|
|
}
|
|
break;
|
|
default:
|
|
pr_warn("send_raw_socket: Packet is too big but fragmentation is disabled!");
|
|
ret = -EINVAL;
|
|
goto erret_lc;
|
|
}
|
|
|
|
int sent = 0;
|
|
ret = send_raw_socket(buff1, buff1_size);
|
|
|
|
if (ret >= 0) sent += ret;
|
|
else {
|
|
goto erret_lc;
|
|
}
|
|
|
|
ret = send_raw_socket(buff2, buff2_size);
|
|
if (ret >= 0) sent += ret;
|
|
else {
|
|
goto erret_lc;
|
|
}
|
|
|
|
NETBUF_FREE(buff1);
|
|
NETBUF_FREE(buff2);
|
|
return sent;
|
|
erret_lc:
|
|
NETBUF_FREE(buff1);
|
|
NETBUF_FREE(buff2);
|
|
return ret;
|
|
}
|
|
|
|
int ipvx = netproto_version(pkt, pktlen);
|
|
|
|
if (ipvx == IP4VERSION)
|
|
return send_raw_ipv4(pkt, pktlen);
|
|
|
|
else if (ipvx == IP6VERSION)
|
|
return send_raw_ipv6(pkt, pktlen);
|
|
|
|
printf("proto version %d is unsupported\n", ipvx);
|
|
return -EINVAL;
|
|
}
|
|
|
|
void delay_packet_send(const unsigned char *data, unsigned int data_len, unsigned int delay_ms) {
|
|
pr_warn("delay_packet_send won't work on current youtubeUnblock version");
|
|
send_raw_socket(data, data_len);
|
|
}
|
|
|
|
struct instance_config_t instance_config = {
|
|
.send_raw_packet = send_raw_socket,
|
|
.send_delayed_packet = delay_packet_send,
|
|
};
|