Last active
December 28, 2025 07:03
-
-
Save ariankordi/b9b21343bef4f0908607f4d6c9b047c0 to your computer and use it in GitHub Desktop.
(SCROLL for MAIN FILE = miitomo-sakasho-obfuscation.fu) Miitomo/DeNA (Sakasho) API obfuscation encoding/decoding in Fusion. Tested in JS, C, Python, Go.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // Generated automatically with "fut". Do not edit. | |
| #include <stdint.h> | |
| #ifdef __cplusplus | |
| extern "C" { | |
| #endif | |
| typedef struct SakashoObfuscation SakashoObfuscation; | |
| /** | |
| * Maximum size for a 32-bit integer as a varint. | |
| */ | |
| #define Varint_MAX_SIZE_INT 5 | |
| /** | |
| * Reads a varint from data starting at posOut[0]. | |
| * Advances posOut[0] to indicate how large the varint was. | |
| * Maximum is a 32-bit int. Returns 0 on invalid varint. | |
| * @param data Byte array containing the varint. | |
| * @param posOut Single-element array containing the cursor. | |
| */ | |
| int Varint_Read(uint8_t const *data, uint8_t *posOut); | |
| /** | |
| * Writes a 32-bit varint to the specified output. | |
| * Returns varint size, or 0 on failure. | |
| * @param offset Offset within dst to write the varint to. | |
| */ | |
| int Varint_Write(uint8_t *dst, int value, int offset); | |
| /** | |
| * Decompresses a chunk of raw LZ4 data into the output buffer. | |
| * Returns the number of bytes decompressed, or -1 on corruption. | |
| * @param src Compressed input data. | |
| * @param dst Decompressed output. | |
| * @param compressedSize Length of compressed input data. | |
| * @param dstCapacity Capacity of the output buffer. | |
| * @param srcOffset Offset into the compressed data. | |
| */ | |
| int Lz4_Decompress(uint8_t const *src, uint8_t *dst, int compressedSize, int dstCapacity, int srcOffset); | |
| /** | |
| * Gets the maximum size of a compressed buffer, same as LZ4_compressBound. | |
| */ | |
| int Lz4_GetMaxCompressedSize(int inputSize); | |
| /** | |
| * Encodes input into raw LZ4 block format with no framing. | |
| * | |
| * <p>Returns the number of bytes written, or -1 on failure. | |
| * <p>This does not actually compress the data, leaving it | |
| * larger than it came in just for interoperability. | |
| * @param src Input data. | |
| * @param dst Destination to write compressed data to. | |
| * @param srcSize Size of input data buffer. | |
| * @param dstCapacity Capacity of the output buffer. | |
| * @param dstOffset Offset into the destination buffer. | |
| */ | |
| int Lz4_Compress(uint8_t const *src, uint8_t *dst, int srcSize, int dstCapacity, int dstOffset); | |
| /** | |
| * Builds or rebuilds the internal XOR table. | |
| * @param self This <code>SakashoObfuscation</code>. | |
| * @param commonKey The common key string. | |
| * @param sessionId The value of the player_session_id cookie, | |
| * or an empty string if the cookie was not set. | |
| */ | |
| void SakashoObfuscation_Initialize(SakashoObfuscation *self, const char *commonKey, const char *sessionId); | |
| /** | |
| * Applies XOR decoding to the buffer in-place. | |
| * @param self This <code>SakashoObfuscation</code>. | |
| */ | |
| void SakashoObfuscation_XorDecode(const SakashoObfuscation *self, uint8_t *data, int dataLen); | |
| /** | |
| * Applies XOR encoding to the buffer in-place. | |
| * @param self This <code>SakashoObfuscation</code>. | |
| */ | |
| void SakashoObfuscation_XorEncode(const SakashoObfuscation *self, uint8_t *data, int dataLen); | |
| /** | |
| * Gets decompressed size from obfuscated/compressed | |
| * data, or 0 if the size varint is invalid. | |
| * @param self This <code>SakashoObfuscation</code>. | |
| */ | |
| int SakashoObfuscation_GetDecompressedSize(const SakashoObfuscation *self, uint8_t const *data, uint8_t *posOut); | |
| /** | |
| * Fully decodes and decompresses obfuscated bytes. | |
| * Returns a pre-allocated byte array (must be freed by the caller) | |
| * or null if the decompression failed. | |
| * @param self This <code>SakashoObfuscation</code>. | |
| */ | |
| uint8_t *SakashoObfuscation_Decode(const SakashoObfuscation *self, uint8_t const *data, int dataLen); | |
| /** | |
| * Fully compresses and obfuscates raw bytes. | |
| * | |
| * <p>Returned a pre-allocated byte array (must be freed by the caller) | |
| * containing the compressed and obfuscated data, or null on failure. | |
| * <p>NOTE: The length of the array is posOut. You must trim the output. | |
| * Example: const len = new Uint8Array([0]); | |
| * let result = obfs.encode(in, size, len); result = result.subarray(0, len); | |
| * @param self This <code>SakashoObfuscation</code>. | |
| * @param posOut Array where the 0th element is the output size. | |
| */ | |
| uint8_t *SakashoObfuscation_Encode(const SakashoObfuscation *self, uint8_t const *data, int dataLen, int *posOut); | |
| /** | |
| * Applies the conditional XOR/bit rotation operation to decode the data. | |
| * Reference: libsaksho.so:FUN_0004ec70, Java_jp_dena_sakasho_core_delegate_CookedResponseDelegate_cookResponse | |
| */ | |
| void SakashoObfuscation_XorDecodeBuffer(uint8_t *data, int dataLen, uint8_t const *table, int tableLen); | |
| /** | |
| * Applies the conditional XOR/bit rotation operation to encode the data. | |
| * Reference: libsaksho.so:FUN_0004ebc0, Java_jp_dena_sakasho_core_http_CookedRequestBody_cookRequest | |
| */ | |
| void SakashoObfuscation_XorEncodeBuffer(uint8_t *data, int dataLen, uint8_t const *table, int tableLen); | |
| #ifdef __cplusplus | |
| } | |
| #endif | |
| // Generated automatically with "fut". Do not edit. | |
| #include <assert.h> | |
| #include <stddef.h> | |
| #include <stdlib.h> | |
| #include <string.h> | |
| typedef void (*FuMethodPtr)(void *); | |
| typedef struct { | |
| size_t count; | |
| size_t unitSize; | |
| size_t refCount; | |
| FuMethodPtr destructor; | |
| } FuShared; | |
| static void *FuShared_Make(size_t count, size_t unitSize, FuMethodPtr constructor, FuMethodPtr destructor) | |
| { | |
| FuShared *self = (FuShared *) malloc(sizeof(FuShared) + count * unitSize); | |
| self->count = count; | |
| self->unitSize = unitSize; | |
| self->refCount = 1; | |
| self->destructor = destructor; | |
| if (constructor != NULL) { | |
| for (size_t i = 0; i < count; i++) | |
| constructor((char *) (self + 1) + i * unitSize); | |
| } | |
| return self + 1; | |
| } | |
| static void FuShared_Release(void *ptr) | |
| { | |
| if (ptr == NULL) | |
| return; | |
| FuShared *self = (FuShared *) ptr - 1; | |
| if (--self->refCount != 0) | |
| return; | |
| if (self->destructor != NULL) { | |
| for (size_t i = self->count; i > 0;) | |
| self->destructor((char *) ptr + --i * self->unitSize); | |
| } | |
| free(self); | |
| } | |
| /** | |
| * Deobfuscation for DeNA Sakasho HTTP request/response content. | |
| * | |
| * <p>Reverse engineered from Miitomo, may be used elsewhere. | |
| * They call this "CookedResponse"/"CookedRequestBody" in symbols. | |
| * <p>Consists of XOR/bit rotation, LZ4 compression, and varint length field. | |
| * This class just implements the XOR logic and a generic decode method | |
| * calling the Varint and Lz4 classes implemented here. | |
| */ | |
| struct SakashoObfuscation { | |
| uint8_t xorTable[256]; | |
| int xorLen; | |
| }; | |
| int Varint_Read(uint8_t const *data, uint8_t *posOut) | |
| { | |
| int value = 0; | |
| int shift = 0; | |
| int pos = posOut[0]; | |
| for (int i = 0; i < 5; i++) { | |
| int b = data[pos++]; | |
| value |= (b & 127) << shift; | |
| if ((b & 128) == 0) { | |
| posOut[0] = (uint8_t) pos; | |
| return value; | |
| } | |
| shift += 7; | |
| if (shift >= 35) | |
| return 0; | |
| } | |
| return 0; | |
| } | |
| int Varint_Write(uint8_t *dst, int value, int offset) | |
| { | |
| int pos = 0; | |
| for (int i = 0; i < 5; i++) { | |
| int b = value & 127; | |
| value >>= 7; | |
| if (value != 0) { | |
| dst[offset + pos++] = (uint8_t) (b | 128); | |
| } | |
| else { | |
| dst[offset + pos++] = (uint8_t) b; | |
| return pos; | |
| } | |
| } | |
| return 0; | |
| } | |
| int Lz4_Decompress(uint8_t const *src, uint8_t *dst, int compressedSize, int dstCapacity, int srcOffset) | |
| { | |
| int srcPos = 0; | |
| int dstPos = 0; | |
| while (srcPos < compressedSize && dstPos < dstCapacity) { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| int token = src[srcOffset + srcPos++]; | |
| int encCount = token & 15; | |
| int litCount = token >> 4 & 15; | |
| if (litCount == 15) { | |
| int sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| sum = src[srcOffset + srcPos++]; | |
| litCount += sum; | |
| } | |
| while (sum == 255); | |
| } | |
| if (srcPos + litCount > compressedSize) | |
| return -1; | |
| if (dstPos + litCount > dstCapacity) | |
| return -1; | |
| memcpy(dst + dstPos, src + (srcOffset + srcPos), litCount); | |
| srcPos += litCount; | |
| dstPos += litCount; | |
| if (srcPos >= compressedSize) | |
| break; | |
| if (srcPos + 1 >= compressedSize) | |
| return -1; | |
| int back = src[srcOffset + srcPos] | src[srcOffset + srcPos + 1] << 8; | |
| srcPos += 2; | |
| if (back <= 0 || back > dstPos) | |
| return -1; | |
| if (encCount == 15) { | |
| int sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| sum = src[srcOffset + srcPos++]; | |
| encCount += sum; | |
| } | |
| while (sum == 255); | |
| } | |
| encCount += 4; | |
| if (dstPos + encCount > dstCapacity) | |
| return -1; | |
| int encPos = dstPos - back; | |
| if (encCount <= back) { | |
| memcpy(dst + dstPos, dst + encPos, encCount); | |
| dstPos += encCount; | |
| } | |
| else { | |
| for (int i = 0; i < encCount; i++) { | |
| dst[dstPos++] = dst[encPos++]; | |
| } | |
| } | |
| } | |
| return dstPos; | |
| } | |
| int Lz4_GetMaxCompressedSize(int inputSize) | |
| { | |
| return inputSize > 2113929216 ? 0 : inputSize + inputSize / 255 + 16; | |
| } | |
| int Lz4_Compress(uint8_t const *src, uint8_t *dst, int srcSize, int dstCapacity, int dstOffset) | |
| { | |
| if (srcSize < 0) | |
| return -1; | |
| int dstPos = 0; | |
| int litLen = srcSize; | |
| int tokenLit = litLen < 15 ? litLen : 15; | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = (uint8_t) (tokenLit << 4); | |
| if (litLen >= 15) { | |
| int len = litLen - 15; | |
| while (len >= 255) { | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = 255; | |
| len -= 255; | |
| } | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = (uint8_t) len; | |
| } | |
| if (dstPos + srcSize > dstCapacity) | |
| return -1; | |
| memcpy(dst + (dstOffset + dstPos), src, srcSize); | |
| dstPos += srcSize; | |
| return dstPos; | |
| } | |
| void SakashoObfuscation_Initialize(SakashoObfuscation *self, const char *commonKey, const char *sessionId) | |
| { | |
| self->xorLen = 0; | |
| for (int i = 0; i < (ptrdiff_t) strlen(commonKey) && self->xorLen < 256; i++) { | |
| self->xorTable[self->xorLen++] = (uint8_t) (-98 - commonKey[i]); | |
| } | |
| for (int i = 0; i < (ptrdiff_t) strlen(sessionId) && self->xorLen < 256; i++) { | |
| self->xorTable[self->xorLen++] = (uint8_t) sessionId[i]; | |
| } | |
| } | |
| void SakashoObfuscation_XorDecode(const SakashoObfuscation *self, uint8_t *data, int dataLen) | |
| { | |
| SakashoObfuscation_XorDecodeBuffer(data, dataLen, self->xorTable, self->xorLen); | |
| } | |
| void SakashoObfuscation_XorEncode(const SakashoObfuscation *self, uint8_t *data, int dataLen) | |
| { | |
| SakashoObfuscation_XorEncodeBuffer(data, dataLen, self->xorTable, self->xorLen); | |
| } | |
| int SakashoObfuscation_GetDecompressedSize(const SakashoObfuscation *self, uint8_t const *data, uint8_t *posOut) | |
| { | |
| uint8_t tmp[5]; | |
| memcpy(tmp, data, 5); | |
| SakashoObfuscation_XorDecode(self, tmp, 5); | |
| uint8_t posOutLocal[1]; | |
| posOutLocal[0] = 0; | |
| return posOut == NULL ? Varint_Read(tmp, posOutLocal) : Varint_Read(tmp, posOut); | |
| } | |
| uint8_t *SakashoObfuscation_Decode(const SakashoObfuscation *self, uint8_t const *data, int dataLen) | |
| { | |
| uint8_t *tmp = (uint8_t *) malloc(dataLen * sizeof(uint8_t)); | |
| memcpy(tmp, data, dataLen); | |
| SakashoObfuscation_XorDecode(self, tmp, dataLen); | |
| uint8_t posOut[1]; | |
| posOut[0] = 0; | |
| int size = Varint_Read(tmp, posOut); | |
| if (size <= 0) { | |
| free(tmp); | |
| return NULL; | |
| } | |
| assert(size <= 104857600); | |
| uint8_t *output = (uint8_t *) FuShared_Make(size, sizeof(uint8_t), NULL, NULL); | |
| int decompressed = Lz4_Decompress(tmp, output, dataLen - posOut[0], size, posOut[0]); | |
| if (decompressed <= 0) { | |
| FuShared_Release(output); | |
| free(tmp); | |
| return NULL; | |
| } | |
| free(tmp); | |
| return output; | |
| } | |
| uint8_t *SakashoObfuscation_Encode(const SakashoObfuscation *self, uint8_t const *data, int dataLen, int *posOut) | |
| { | |
| int maxCompressed = Lz4_GetMaxCompressedSize(dataLen); | |
| if (maxCompressed <= 0) | |
| return NULL; | |
| uint8_t *buffer = (uint8_t *) FuShared_Make(5 + maxCompressed, sizeof(uint8_t), NULL, NULL); | |
| int varintSize = Varint_Write(buffer, dataLen, 0); | |
| if (varintSize <= 0) { | |
| FuShared_Release(buffer); | |
| return NULL; | |
| } | |
| int compressedSize = Lz4_Compress(data, buffer, dataLen, maxCompressed, varintSize); | |
| if (compressedSize <= 0) { | |
| FuShared_Release(buffer); | |
| return NULL; | |
| } | |
| int totalSize = varintSize + compressedSize; | |
| SakashoObfuscation_XorEncode(self, buffer, totalSize); | |
| posOut[0] = totalSize; | |
| return buffer; | |
| } | |
| void SakashoObfuscation_XorDecodeBuffer(uint8_t *data, int dataLen, uint8_t const *table, int tableLen) | |
| { | |
| for (int i = 0; i < dataLen; i++) { | |
| int keyByte = table[(i + 1) % tableLen]; | |
| int inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| data[i] = (uint8_t) (inputByte ^ keyByte); | |
| } | |
| else { | |
| int shift = keyByte & 7; | |
| data[i] = (uint8_t) (inputByte >> (8 - shift) | inputByte << shift); | |
| } | |
| } | |
| } | |
| void SakashoObfuscation_XorEncodeBuffer(uint8_t *data, int dataLen, uint8_t const *table, int tableLen) | |
| { | |
| for (int i = 0; i < dataLen; i++) { | |
| int keyByte = table[(i + 1) % tableLen]; | |
| int inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| data[i] = (uint8_t) (inputByte ^ keyByte); | |
| } | |
| else { | |
| int shift = keyByte & 7; | |
| data[i] = (uint8_t) (inputByte << (8 - shift) | inputByte >> shift); | |
| } | |
| } | |
| } | |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #include <string.h> | |
| #include <stdint.h> | |
| #include <stdbool.h> | |
| #define DEFAULT_COMMON_KEY "9ec1c78fa2cb34e2bed5691c08432f04" | |
| /* ---------- Utility ---------- */ | |
| static uint8_t *read_all(FILE *f, size_t *outSize) { | |
| uint8_t *buf = NULL; | |
| size_t size = 0; | |
| size_t cap = 0; | |
| for (;;) { | |
| if (size + 4096 > cap) { | |
| cap = cap ? cap * 2 : 8192; | |
| buf = (uint8_t *)realloc(buf, cap); | |
| if (!buf) return NULL; | |
| } | |
| size_t n = fread(buf + size, 1, cap - size, f); | |
| size += n; | |
| if (n == 0) break; | |
| } | |
| *outSize = size; | |
| return buf; | |
| } | |
| static int write_all(FILE *f, const uint8_t *data, size_t size) { | |
| return fwrite(data, 1, size, f) == size ? 0 : -1; | |
| } | |
| static void usage(const char *prog) { | |
| fprintf(stderr, | |
| "Usage: %s (-e|-d) [options]\n" | |
| "\n" | |
| "Options:\n" | |
| " -e, --encode Encode input\n" | |
| " -d, --decode Decode input\n" | |
| " -k, --common-key KEY Common key (default Miitomo key)\n" | |
| " -s, --session-id ID Session ID (optional)\n" | |
| " -i, --input FILE Input file (default: stdin)\n" | |
| " -o, --output FILE Output file (default: stdout)\n", | |
| prog); | |
| } | |
| /* ---------- Main ---------- */ | |
| int main(int argc, char **argv) { | |
| bool doEncode = false; | |
| bool doDecode = false; | |
| const char *commonKey = DEFAULT_COMMON_KEY; | |
| const char *sessionId = ""; | |
| const char *inputPath = NULL; | |
| const char *outputPath = NULL; | |
| /* --- Parse args (simple, explicit) --- */ | |
| for (int i = 1; i < argc; i++) { | |
| const char *a = argv[i]; | |
| if (!strcmp(a, "-e") || !strcmp(a, "--encode")) { | |
| doEncode = true; | |
| } else if (!strcmp(a, "-d") || !strcmp(a, "--decode")) { | |
| doDecode = true; | |
| } else if (!strcmp(a, "-k") || !strcmp(a, "--common-key")) { | |
| if (++i >= argc) goto bad; | |
| commonKey = argv[i]; | |
| } else if (!strcmp(a, "-s") || !strcmp(a, "--session-id")) { | |
| if (++i >= argc) goto bad; | |
| sessionId = argv[i]; | |
| } else if (!strcmp(a, "-i") || !strcmp(a, "--input")) { | |
| if (++i >= argc) goto bad; | |
| inputPath = argv[i]; | |
| } else if (!strcmp(a, "-o") || !strcmp(a, "--output")) { | |
| if (++i >= argc) goto bad; | |
| outputPath = argv[i]; | |
| } else { | |
| goto bad; | |
| } | |
| } | |
| if (doEncode == doDecode) { | |
| fprintf(stderr, "Error: must specify exactly one of -e or -d\n"); | |
| goto bad; | |
| } | |
| /* --- Open IO --- */ | |
| FILE *in = inputPath ? fopen(inputPath, "rb") : stdin; | |
| if (!in) { | |
| perror("fopen input"); | |
| return 1; | |
| } | |
| FILE *out = outputPath ? fopen(outputPath, "wb") : stdout; | |
| if (!out) { | |
| perror("fopen output"); | |
| if (in != stdin) fclose(in); | |
| return 1; | |
| } | |
| /* --- Read input --- */ | |
| size_t inputSize = 0; | |
| uint8_t *input = read_all(in, &inputSize); | |
| if (!input) { | |
| fprintf(stderr, "Failed to read input\n"); | |
| return 1; | |
| } | |
| /* --- Init obfuscator --- */ | |
| SakashoObfuscation obfs; | |
| SakashoObfuscation_Initialize(&obfs, commonKey, sessionId); | |
| uint8_t *result = NULL; | |
| size_t resultSize = 0; | |
| if (doDecode) { | |
| result = SakashoObfuscation_Decode(&obfs, input, (int)inputSize); | |
| if (!result) { | |
| fprintf(stderr, "Decode failed\n"); | |
| return 1; | |
| } | |
| /* Size is known from varint */ | |
| uint8_t pos = 0; | |
| resultSize = SakashoObfuscation_GetDecompressedSize(&obfs, input, &pos); | |
| if (resultSize == 0) { | |
| fprintf(stderr, "Invalid decompressed size\n"); | |
| free(result); | |
| return 1; | |
| } | |
| } else { | |
| int outLen = 0; | |
| result = SakashoObfuscation_Encode(&obfs, input, (int)inputSize, &outLen); | |
| if (!result || outLen <= 0) { | |
| fprintf(stderr, "Encode failed\n"); | |
| return 1; | |
| } | |
| resultSize = (size_t)outLen; | |
| } | |
| /* --- Write output --- */ | |
| if (write_all(out, result, resultSize) != 0) { | |
| fprintf(stderr, "Write failed\n"); | |
| return 1; | |
| } | |
| /* --- Cleanup --- */ | |
| free(input); | |
| FuShared_Release(result); | |
| if (in != stdin) fclose(in); | |
| if (out != stdout) fclose(out); | |
| return 0; | |
| bad: | |
| usage(argv[0]); | |
| return 1; | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| package main | |
| import ( | |
| "bytes" | |
| "crypto/tls" | |
| "flag" | |
| "io" | |
| "log" | |
| "net/http" | |
| "net/http/httputil" | |
| "net/http/httptest" | |
| "net/url" | |
| "strings" | |
| //"strconv" | |
| // for access logs | |
| "fmt" | |
| "net" | |
| "os" | |
| "time" | |
| //"github.com/pierrec/lz4" | |
| "unsafe" | |
| ) | |
| const commonKey = "9ec1c78fa2cb34e2bed5691c08432f04" | |
| // Generated automatically with "fut". Do not edit. | |
| const Varint_MAX_SIZE_INT = 5 | |
| type SakashoObfuscation struct { | |
| xorTable [256]uint8 | |
| xorLen int | |
| } | |
| // MemCpy copies count characters from the object pointed to by src to the object pointed to by dest. Both objects are | |
| // interpreted as arrays of byte. | |
| // | |
| // The behavior is undefined if access occurs beyond the end of the dest array. If the objects overlap (which is a | |
| // violation of the restrict contract), the behavior is undefined. The behavior is undefined if either dest or src is a | |
| // null pointer. | |
| func MemCpy(dst, src unsafe.Pointer, sz int) unsafe.Pointer { | |
| if dst == nil { | |
| panic("nil destination") | |
| } | |
| if sz == 0 || src == nil { | |
| return dst | |
| } | |
| bdst := unsafe.Slice((*byte)(dst), sz) | |
| bsrc := unsafe.Slice((*byte)(src), sz) | |
| copy(bdst, bsrc) | |
| return dst | |
| } | |
| func findnull[T interface{ byte | uint16 | uint32 }](str *T) int { | |
| if str == nil { | |
| return 0 | |
| } | |
| var zero T | |
| size := unsafe.Sizeof(zero) | |
| i := 0 | |
| for *str != 0 { | |
| str = (*T)(unsafe.Add(unsafe.Pointer(str), size)) | |
| i++ | |
| } | |
| return i | |
| } | |
| // StrLen returns the length of the given null-terminated byte string, that is, the number of characters in a character | |
| // array whose first element is pointed to by str up to and not including the first null character. | |
| // | |
| // The behavior is undefined if str is not a pointer to a null-terminated byte string. | |
| func StrLen(str *byte) int { | |
| return findnull(str) | |
| } | |
| func Varint_Read(data *uint8, posOut *uint8) int { | |
| var ( | |
| value int = 0 | |
| shift int = 0 | |
| pos int = int(*posOut) | |
| ) | |
| for i := int(0); i < 5; i++ { | |
| var b int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(data), func() int { | |
| p_ := &pos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()))) | |
| value |= (b & 127) << shift | |
| if (b & 128) == 0 { | |
| *posOut = uint8(int8(pos)) | |
| return value | |
| } | |
| shift += 7 | |
| if shift >= 35 { | |
| return 0 | |
| } | |
| } | |
| return 0 | |
| } | |
| func Varint_Write(dst *uint8, value int, offset int) int { | |
| var pos int = 0 | |
| for i := int(0); i < 5; i++ { | |
| var b int = value & 127 | |
| value >>= 7 | |
| if value != 0 { | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), offset+func() int { | |
| p_ := &pos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = uint8(int8(b | 128)) | |
| } else { | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), offset+func() int { | |
| p_ := &pos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = uint8(int8(b)) | |
| return pos | |
| } | |
| } | |
| return 0 | |
| } | |
| func Lz4_Decompress(src *uint8, dst *uint8, compressedSize int, dstCapacity int, srcOffset int) int { | |
| var ( | |
| srcPos int = 0 | |
| dstPos int = 0 | |
| ) | |
| for srcPos < compressedSize && dstPos < dstCapacity { | |
| if srcPos >= compressedSize { | |
| return -1 | |
| } | |
| var token int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(src), srcOffset+func() int { | |
| p_ := &srcPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()))) | |
| var encCount int = token & 15 | |
| var litCount int = token >> 4 & 15 | |
| if litCount == 15 { | |
| var sum int = 0 | |
| for { | |
| if srcPos >= compressedSize { | |
| return -1 | |
| } | |
| sum = int(*(*uint8)(unsafe.Add(unsafe.Pointer(src), srcOffset+func() int { | |
| p_ := &srcPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()))) | |
| litCount += sum | |
| if sum != 255 { | |
| break | |
| } | |
| } | |
| } | |
| if srcPos+litCount > compressedSize { | |
| return -1 | |
| } | |
| if dstPos+litCount > dstCapacity { | |
| return -1 | |
| } | |
| MemCpy(unsafe.Add(unsafe.Pointer(dst), dstPos), unsafe.Add(unsafe.Pointer(src), srcOffset+srcPos), litCount) | |
| srcPos += litCount | |
| dstPos += litCount | |
| if srcPos >= compressedSize { | |
| break | |
| } | |
| if srcPos+1 >= compressedSize { | |
| return -1 | |
| } | |
| var back int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(src), srcOffset+srcPos))) | int(*(*uint8)(unsafe.Add(unsafe.Pointer(src), srcOffset+srcPos+1)))<<8 | |
| srcPos += 2 | |
| if back <= 0 || back > dstPos { | |
| return -1 | |
| } | |
| if encCount == 15 { | |
| var sum int = 0 | |
| for { | |
| if srcPos >= compressedSize { | |
| return -1 | |
| } | |
| sum = int(*(*uint8)(unsafe.Add(unsafe.Pointer(src), srcOffset+func() int { | |
| p_ := &srcPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()))) | |
| encCount += sum | |
| if sum != 255 { | |
| break | |
| } | |
| } | |
| } | |
| encCount += 4 | |
| if dstPos+encCount > dstCapacity { | |
| return -1 | |
| } | |
| var encPos int = dstPos - back | |
| if encCount <= back { | |
| MemCpy(unsafe.Add(unsafe.Pointer(dst), dstPos), unsafe.Add(unsafe.Pointer(dst), encPos), encCount) | |
| dstPos += encCount | |
| } else { | |
| for i := int(0); i < encCount; i++ { | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), func() int { | |
| p_ := &dstPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = *(*uint8)(unsafe.Add(unsafe.Pointer(dst), func() int { | |
| p_ := &encPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) | |
| } | |
| } | |
| } | |
| return dstPos | |
| } | |
| func Lz4_GetMaxCompressedSize(inputSize int) int { | |
| if inputSize > 2113929216 { | |
| return 0 | |
| } | |
| return inputSize + inputSize/255 + 16 | |
| } | |
| func Lz4_Compress(src *uint8, dst *uint8, srcSize int, dstCapacity int, dstOffset int) int { | |
| if srcSize < 0 { | |
| return -1 | |
| } | |
| var dstPos int = 0 | |
| var litLen int = srcSize | |
| var tokenLit int | |
| if litLen < 15 { | |
| tokenLit = litLen | |
| } else { | |
| tokenLit = 15 | |
| } | |
| if dstPos >= dstCapacity { | |
| return -1 | |
| } | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), dstOffset+func() int { | |
| p_ := &dstPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = uint8(int8(tokenLit << 4)) | |
| if litLen >= 15 { | |
| var len_ int = litLen - 15 | |
| for len_ >= 255 { | |
| if dstPos >= dstCapacity { | |
| return -1 | |
| } | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), dstOffset+func() int { | |
| p_ := &dstPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = 255 | |
| len_ -= 255 | |
| } | |
| if dstPos >= dstCapacity { | |
| return -1 | |
| } | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(dst), dstOffset+func() int { | |
| p_ := &dstPos | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }())) = uint8(int8(len_)) | |
| } | |
| if dstPos+srcSize > dstCapacity { | |
| return -1 | |
| } | |
| MemCpy(unsafe.Add(unsafe.Pointer(dst), dstOffset+dstPos), unsafe.Pointer(src), srcSize) | |
| dstPos += srcSize | |
| return dstPos | |
| } | |
| func SakashoObfuscation_Initialize(self *SakashoObfuscation, commonKey *byte, sessionId *byte) { | |
| self.xorLen = 0 | |
| for i := int(0); i < int(int64(StrLen(commonKey))) && self.xorLen < 256; i++ { | |
| var c int = int(*(*byte)(unsafe.Add(unsafe.Pointer(commonKey), i))) | |
| self.xorTable[func() int { | |
| p_ := &self.xorLen | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()] = uint8(int8(-98 - c)) | |
| } | |
| for i := int(0); i < int(int64(StrLen(sessionId))) && self.xorLen < 256; i++ { | |
| self.xorTable[func() int { | |
| p_ := &self.xorLen | |
| x := *p_ | |
| *p_++ | |
| return x | |
| }()] = uint8(*(*byte)(unsafe.Add(unsafe.Pointer(sessionId), i))) | |
| } | |
| } | |
| func SakashoObfuscation_XorDecode(self *SakashoObfuscation, data *uint8, dataLen int) { | |
| SakashoObfuscation_XorDecodeBuffer(data, dataLen, &self.xorTable[0], self.xorLen) | |
| } | |
| func SakashoObfuscation_XorEncode(self *SakashoObfuscation, data *uint8, dataLen int) { | |
| SakashoObfuscation_XorEncodeBuffer(data, dataLen, &self.xorTable[0], self.xorLen) | |
| } | |
| func SakashoObfuscation_GetDecompressedSize(self *SakashoObfuscation, data *uint8, posOut *uint8) int { | |
| var tmp [5]uint8 | |
| MemCpy(unsafe.Pointer(&tmp[0]), unsafe.Pointer(data), 5) | |
| SakashoObfuscation_XorDecode(self, &tmp[0], 5) | |
| var posOutLocal [1]uint8 | |
| posOutLocal[0] = 0 | |
| if posOut == nil { | |
| return Varint_Read(&tmp[0], &posOutLocal[0]) | |
| } | |
| return Varint_Read(&tmp[0], posOut) | |
| } | |
| func SakashoObfuscation_XorDecodeBuffer(data *uint8, dataLen int, table *uint8, tableLen int) { | |
| for i := int(0); i < dataLen; i++ { | |
| var ( | |
| keyByte int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(table), (i+1)%tableLen))) | |
| inputByte int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(data), i))) | |
| ) | |
| if (keyByte & 7) == 0 { | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(data), i)) = uint8(int8(inputByte ^ keyByte)) | |
| } else { | |
| var shift int = keyByte & 7 | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(data), i)) = uint8(int8(inputByte>>(8-shift) | inputByte<<shift)) | |
| } | |
| } | |
| } | |
| func SakashoObfuscation_XorEncodeBuffer(data *uint8, dataLen int, table *uint8, tableLen int) { | |
| for i := int(0); i < dataLen; i++ { | |
| var ( | |
| keyByte int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(table), (i+1)%tableLen))) | |
| inputByte int = int(*(*uint8)(unsafe.Add(unsafe.Pointer(data), i))) | |
| ) | |
| if (keyByte & 7) == 0 { | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(data), i)) = uint8(int8(inputByte ^ keyByte)) | |
| } else { | |
| var shift int = keyByte & 7 | |
| *(*uint8)(unsafe.Add(unsafe.Pointer(data), i)) = uint8(int8(inputByte<<(8-shift) | inputByte>>shift)) | |
| } | |
| } | |
| } | |
| // | |
| // | |
| // | |
| // | |
| // | |
| // SakashoObfuscation wrapper with convenience methods. | |
| // The struct is already defined in the C-transpiled code. | |
| // We just add these methods. | |
| // Decode fully decodes and decompresses obfuscated bytes. | |
| // Returns the decompressed data or an error. | |
| func (obfs *SakashoObfuscation) Decode(data []byte) ([]byte, error) { | |
| if len(data) == 0 { | |
| return nil, fmt.Errorf("empty data") | |
| } | |
| // Copy input (XOR decode modifies in-place) | |
| decodedData := make([]byte, len(data)) | |
| copy(decodedData, data) | |
| // XOR decode in-place | |
| SakashoObfuscation_XorDecode( | |
| obfs, | |
| (*uint8)(unsafe.Pointer(&decodedData[0])), | |
| len(decodedData), | |
| ) | |
| // Read varint to get decompressed size | |
| posOut := uint8(0) | |
| decompressedSize := Varint_Read( | |
| (*uint8)(unsafe.Pointer(&decodedData[0])), | |
| &posOut, | |
| ) | |
| if decompressedSize < 1 { | |
| return nil, fmt.Errorf("invalid varint") | |
| } | |
| // Extract compressed data (skip varint) | |
| compressedData := decodedData[posOut:] | |
| // Decompress | |
| output := make([]byte, decompressedSize) | |
| result := Lz4_Decompress( | |
| (*uint8)(unsafe.Pointer(&compressedData[0])), | |
| (*uint8)(unsafe.Pointer(&output[0])), | |
| len(compressedData), | |
| decompressedSize, | |
| 0, // srcOffset | |
| ) | |
| if result < 0 { | |
| return nil, fmt.Errorf("lz4 decompress failed") | |
| } | |
| return output[:result], nil | |
| } | |
| // Encode fully compresses and obfuscates raw bytes. | |
| // Returns the encoded data or an error. | |
| func (obfs *SakashoObfuscation) Encode(data []byte) ([]byte, error) { | |
| if len(data) < 0 { | |
| return nil, fmt.Errorf("invalid data size") | |
| } | |
| // Get max compressed size bound | |
| maxCompressed := Lz4_GetMaxCompressedSize(len(data)) | |
| if maxCompressed <= 0 { | |
| return nil, fmt.Errorf("lz4 bound failed") | |
| } | |
| // Allocate buffer for varint + compressed | |
| // We'll use a temporary buffer larger than needed, then trim | |
| tempBuf := make([]byte, 5+maxCompressed) // 5 = max varint size | |
| // Write varint (decompressed size) | |
| varintSize := Varint_Write( | |
| (*uint8)(unsafe.Pointer(&tempBuf[0])), | |
| len(data), | |
| 0, // offset | |
| ) | |
| if varintSize <= 0 { | |
| return nil, fmt.Errorf("varint write failed") | |
| } | |
| // Compress into buffer after varint | |
| compressedSize := Lz4_Compress( | |
| (*uint8)(unsafe.Pointer(&data[0])), | |
| (*uint8)(unsafe.Pointer(&tempBuf[0])), | |
| len(data), | |
| maxCompressed, | |
| varintSize, // dstOffset | |
| ) | |
| if compressedSize <= 0 { | |
| return nil, fmt.Errorf("lz4 compress failed") | |
| } | |
| totalSize := varintSize + compressedSize | |
| // Trim to exact size | |
| encodedData := tempBuf[:totalSize] | |
| // XOR encode in-place | |
| SakashoObfuscation_XorEncode( | |
| obfs, | |
| (*uint8)(unsafe.Pointer(&encodedData[0])), | |
| len(encodedData), | |
| ) | |
| // Make a copy to return (safe ownership) | |
| result := make([]byte, len(encodedData)) | |
| copy(result, encodedData) | |
| return result, nil | |
| } | |
| // Initialize builds the XOR table from common key and session ID. | |
| // Wrapper for convenience (C version takes *byte). | |
| func (obfs *SakashoObfuscation) Initialize(commonKey, sessionID string) { | |
| // Create C-compatible null-terminated strings | |
| ckBytes := append([]byte(commonKey), 0) | |
| sidBytes := append([]byte(sessionID), 0) | |
| SakashoObfuscation_Initialize( | |
| obfs, | |
| (*byte)(unsafe.Pointer(&ckBytes[0])), | |
| (*byte)(unsafe.Pointer(&sidBytes[0])), | |
| ) | |
| } | |
| // Function to detect and handle interruptions in the data | |
| func detectInterruptions(data []byte, cutoffs []string) ([]byte, bool) { | |
| for _, cutoff := range cutoffs { | |
| index := bytes.Index(data, []byte(cutoff)) | |
| if index != -1 { | |
| return data[:index], true | |
| } | |
| } | |
| return data, false | |
| } | |
| var cutoffs []string = []string{ | |
| "<!doctype", | |
| } | |
| // proxyHandler handles incoming requests, decodes them, forwards to upstream, and re-encodes responses. | |
| func proxyHandler(upstreamURL *url.URL, certFile, keyFile string) func(w http.ResponseWriter, r *http.Request) { | |
| upstreamProxy := httputil.NewSingleHostReverseProxy(upstreamURL) | |
| upstreamProxy.Transport = &http.Transport{ | |
| TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, | |
| Proxy: http.ProxyFromEnvironment, // Allow proxy config from env | |
| } | |
| // Override the Director function to ensure we pass the original URL's host header | |
| upstreamProxy.Director = func(req *http.Request) { | |
| // Set the scheme and host to the upstream server's | |
| req.URL.Scheme = upstreamURL.Scheme | |
| req.URL.Host = upstreamURL.Host | |
| req.Host = upstreamURL.Host | |
| // Copy over the original path and query | |
| req.URL.Path = req.URL.Path | |
| req.URL.RawQuery = req.URL.RawQuery | |
| // Set the Host header explicitly | |
| req.Header.Set("Host", upstreamURL.Host) | |
| //req.Header.Del("Content-Length") | |
| } | |
| return func(w http.ResponseWriter, r *http.Request) { | |
| sessionIDCookie, _ := r.Cookie("player_session_id") | |
| userAgent := r.Header.Get("User-Agent") | |
| // Bypass de/obfuscation logic if sessionID is missing, User-Agent contains "SakashoClient", or it's /v1/session | |
| if sessionIDCookie == nil || !strings.Contains(userAgent, "SakashoClient") || r.URL.Path == "/v1/session" { | |
| // Directly proxy without de/obfuscation | |
| upstreamProxy.ServeHTTP(w, r) | |
| return | |
| } | |
| sessionID := sessionIDCookie.Value | |
| // Build obfuscator | |
| obfs := &SakashoObfuscation{} | |
| obfs.Initialize(commonKey, sessionID) | |
| //xorTable := buildXorTable(commonKey, sessionID) | |
| // Properly handle empty request bodies | |
| if r.Body != nil { | |
| buf := new(bytes.Buffer) | |
| _, err := io.Copy(buf, r.Body) | |
| if err != nil && err != io.EOF { | |
| http.Error(w, "Failed to read request body", http.StatusInternalServerError) | |
| return | |
| } | |
| // Only decode if the body is not empty | |
| if buf.Len() > 0 { | |
| //decodedBody, err := decodeAndDecompress(buf, xorTable) | |
| decodedBody, err := obfs.Decode(buf.Bytes()) | |
| if err != nil { | |
| http.Error(w, "Failed to decode request", http.StatusInternalServerError) | |
| return | |
| } | |
| //r.Header.Set("Content-Length", strconv.Itoa(len(decodedBody))) | |
| // Replace body with decoded data | |
| r.Body = io.NopCloser(bytes.NewReader(decodedBody)) | |
| // Remove Content-Length header since the body size has changed | |
| r.Header.Del("Content-Length") | |
| r.ContentLength = -1 | |
| } | |
| } | |
| // Modify headers for upstream server if necessary | |
| r.Host = upstreamURL.Host | |
| r.URL.Scheme = upstreamURL.Scheme | |
| r.URL.Host = upstreamURL.Host | |
| // Set the Host header to match the upstream server | |
| r.Header.Set("Host", upstreamURL.Host) | |
| // Capture the upstream response | |
| rec := httptest.NewRecorder() | |
| upstreamProxy.ServeHTTP(rec, r) | |
| // Pass through upstream headers to client | |
| for k, v := range rec.Header() { | |
| w.Header()[k] = v | |
| } | |
| // Re-encode the response body | |
| if rec.Body != nil && rec.Body.Len() > 0 { | |
| //encodedBody, err := compressAndEncode(rec.Body.Bytes(), xorTable) | |
| encodedBody, err := obfs.Encode(rec.Body.Bytes()) | |
| if err != nil { | |
| http.Error(w, "Failed to encode response", http.StatusInternalServerError) | |
| return | |
| } | |
| // Copy encoded response to original response writer | |
| w.WriteHeader(rec.Code) | |
| /*for k, v := range rec.Header() { | |
| w.Header()[k] = v | |
| }*/ | |
| w.Write(encodedBody) | |
| } | |
| } | |
| } | |
| func main() { | |
| // Argument parsing for certificate, upstream, and hostname to client | |
| var upstreamAddr string | |
| var hostname string | |
| var certFile string | |
| var keyFile string | |
| flag.StringVar(&upstreamAddr, "upstream", "https://upstream.server", "Upstream server URL") | |
| flag.StringVar(&hostname, "hostname", "localhost:8080", "Hostname and port for the client to connect to") | |
| flag.StringVar(&certFile, "cert", "cert.pem", "TLS certificate file") | |
| flag.StringVar(&keyFile, "key", "key.pem", "TLS key file") | |
| flag.Parse() | |
| upstreamURL, err := url.Parse(upstreamAddr) | |
| if err != nil { | |
| log.Fatalf("Invalid upstream URL: %v", err) | |
| } | |
| http.Handle("/", logRequest(http.HandlerFunc(proxyHandler(upstreamURL, certFile, keyFile)))) | |
| log.Printf("Starting proxy server on %s", hostname) | |
| log.Fatal(http.ListenAndServeTLS(hostname, certFile, keyFile, nil)) | |
| } | |
| // fancy access logs | |
| const ( | |
| // ANSI color codes for access logs | |
| ANSIReset = "\033[0m" | |
| ANSIRed = "\033[31m" | |
| ANSIGreen = "\033[32m" | |
| ANSIYellow = "\033[33m" | |
| ANSIPurple = "\033[35m" | |
| ANSIFaint = "\033[2m" | |
| ANSIBold = "\033[1m" | |
| ANSICyan = "\033[36m" | |
| ANSIBgRed = "\033[101m" | |
| ANSIBgBlue = "\033[104m" | |
| ANSIBgMagenta = "\033[105m" | |
| ) | |
| func isColorTerminal() bool { | |
| // NOTE: hack | |
| return os.Getenv("TERM") == "xterm-256color" | |
| } | |
| // getClientIP retrieves the client IP address from the request, | |
| // considering the X-Forwarded-For header if present. | |
| func getClientIP(r *http.Request) string { | |
| host, _, _ := net.SplitHostPort(r.RemoteAddr) | |
| return host | |
| } | |
| // responseWriter is a custom http.ResponseWriter that captures the status code | |
| type responseWriter struct { | |
| http.ResponseWriter | |
| statusCode int | |
| } | |
| // newResponseWriter creates a new responseWriter | |
| func newResponseWriter(w http.ResponseWriter) *responseWriter { | |
| return &responseWriter{w, http.StatusOK} | |
| } | |
| // WriteHeader captures the status code | |
| func (rw *responseWriter) WriteHeader(code int) { | |
| rw.statusCode = code | |
| rw.ResponseWriter.WriteHeader(code) | |
| } | |
| // logRequest logs each request in Apache/Nginx standard format with ANSI colors | |
| func logRequest(handler http.Handler) http.Handler { | |
| return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |
| start := time.Now() | |
| rw := newResponseWriter(w) | |
| handler.ServeHTTP(rw, r) | |
| status := rw.statusCode | |
| latency := time.Since(start) | |
| clientIP := getClientIP(r) | |
| if isColorTerminal() { | |
| statusColor := ANSIGreen | |
| // Determine the status color | |
| if status >= 400 && status < 500 { | |
| statusColor = ANSIYellow | |
| } else if status >= 500 { | |
| statusColor = ANSIRed | |
| } | |
| latencyColor := getLatencyGradientColor(latency) | |
| clientIPColor := ANSICyan | |
| if r.Header.Get("X-Forwarded-For") != "" { | |
| clientIPColor = ANSIBgMagenta | |
| } | |
| var query string | |
| if r.URL.RawQuery != "" { | |
| query += "?" | |
| } | |
| query += r.URL.RawQuery | |
| queryColored := colorQueryParameters(query) | |
| // so many colors..... | |
| fmt.Println(clientIPColor + clientIP + ANSIReset + | |
| " - - [" + start.Format("02/Jan/2006:15:04:05 -0700") + "] \"" + | |
| ANSIGreen + r.Method + " " + r.URL.Path + queryColored + " " + ANSIReset + | |
| ANSIFaint + r.Proto + ANSIReset + "\" " + | |
| statusColor + fmt.Sprint(status) + ANSIReset + " " + | |
| fmt.Sprint(r.ContentLength) + " \"" + | |
| ANSIPurple + r.Referer() + ANSIReset + "\" \"" + | |
| ANSIFaint + r.UserAgent() + ANSIReset + "\" " + | |
| latencyColor + fmt.Sprint(latency) + ANSIReset) | |
| } else { | |
| // apache/nginx request format with latency at the end | |
| fmt.Println(clientIP + " - - [" + start.Format("02/Jan/2006:15:04:05 -0700") + "] \"" + | |
| r.Method + " " + r.RequestURI + " " + r.Proto + "\" " + | |
| fmt.Sprint(status) + " " + fmt.Sprint(r.ContentLength) + " \"" + | |
| r.Referer() + "\" \"" + r.UserAgent() + "\" " + | |
| fmt.Sprint(latency)) | |
| } | |
| }) | |
| } | |
| // Color ranges for latency gradient | |
| var latencyColors = []string{ | |
| "\033[38;5;39m", // Blue | |
| "\033[38;5;51m", // Light blue | |
| "\033[38;5;27m", // Added color (Dark blue) | |
| "\033[38;5;82m", // Green | |
| "\033[38;5;34m", // Added color (Forest green) | |
| "\033[38;5;154m", // Light green | |
| "\033[38;5;220m", // Yellow | |
| "\033[38;5;208m", // Orange | |
| "\033[38;5;198m", // Light red | |
| } | |
| // getLatencyGradientColor returns a gradient color based on the latency | |
| func getLatencyGradientColor(latency time.Duration) string { | |
| millis := latency.Milliseconds() | |
| // Define latency thresholds | |
| thresholds := []int64{40, 60, 85, 100, 150, 230, 400, 600} | |
| for i, threshold := range thresholds { | |
| if millis < threshold { | |
| return latencyColors[i] | |
| } | |
| } | |
| return latencyColors[len(latencyColors)-1] | |
| } | |
| // colorQueryParameters colors the query parameters | |
| func colorQueryParameters(query string) string { | |
| if query == "" { | |
| return "" | |
| } | |
| // NOTE: the question mark and first query key are colored the same | |
| params := strings.Split(query, "&") | |
| var coloredParams []string | |
| for _, param := range params { | |
| keyValue := strings.Split(param, "=") | |
| if len(keyValue) == 2 { | |
| coloredParams = append(coloredParams, fmt.Sprintf("%s%s%s=%s%s%s", ANSICyan, keyValue[0], ANSIReset, ANSIYellow, keyValue[1], ANSIReset)) | |
| } else { | |
| coloredParams = append(coloredParams, param) | |
| } | |
| } | |
| return strings.Join(coloredParams, "&") | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| * DeNA Sakasho/Miitomo API deobfuscator in Fusion by Arian Kordi (https://github.com/ariankordi) | |
| * Original implementation: https://gist.github.com/ariankordi/0b990239daa1f69d571c7de3bec57cc4 | |
| * What is this and how to use?: | |
| * - This looks like C# but it's "Fusion" which transpiles to other languages. | |
| * - Paste this into https://fusion-lang.org/playground and use your language of choice | |
| * - Only JS has been tested for now, so if yours doesn't work then contact me. | |
| * Usage in JS: | |
| // Initialize the XOR tables. | |
| const obfs = new SakashoObfuscation(); | |
| obfs.initialize( | |
| "9ec1c78fa2cb34e2bed5691c08432f04", // commonKey, always same. | |
| // player_session_id cookie value. (Empty by default/for brevity) | |
| ""); | |
| // Decoding | |
| const decompressedSize = obfs.getDecompressedSize(obfuscated); | |
| if (decompressedSize <= 0) throw new Error("Invalid input size."); | |
| const result = obfs.decode(obfuscated, obfuscated.length); | |
| if (!result) throw new Error("Decoding failed."); | |
| console.warn("Input size:", obfuscated.length); | |
| console.warn("Decompressed size and content:", decompressedSize, result.toHex()); | |
| // Encoding | |
| const cmpSize = new Int32Array([0]); | |
| let encoded = obfs.encode(result, decompressedSize, cmpSize); | |
| if (!encoded) throw new Error("Encoding failed."); | |
| encoded = encoded.subarray(0, cmpSize[0]); | |
| console.warn("Encoded size and content:", decompressedSize, encoded.toHex()); | |
| // Re-decoding the encoded output for testing. | |
| const decoded = obfs.decode(encoded, encoded.length); | |
| if (!decoded) throw new Error("RE-decoding failed."); | |
| console.assert(result.toHex() == decoded.toHex()); | |
| */ | |
| /// Reads and writes Base128 Varints used in Protobufs, DWARF... | |
| public static class Varint | |
| { | |
| /// Maximum size for a 32-bit integer as a varint. | |
| public const int MaxSizeInt = 5; | |
| // The true max size is 10 bytes for a 64-bit long, but | |
| // libsakasho.so doesn't handle larger than 32-bit ints. | |
| /// Reads a varint from data starting at posOut[0]. | |
| /// Advances posOut[0] to indicate how large the varint was. | |
| /// Maximum is a 32-bit int. Returns 0 on invalid varint. | |
| public static int Read( | |
| /// Byte array containing the varint. | |
| byte[] data, | |
| /// Single-element array containing the cursor. | |
| /// The offset is read from this, and the varint size is written to it. | |
| byte[]! posOut) | |
| { | |
| int value = 0; | |
| int shift = 0; | |
| int pos = posOut[0]; | |
| // libsakasho.so:FUN_0004aeac | |
| for (int i = 0; i < MaxSizeInt; i++) { | |
| int b = data[pos++]; | |
| value |= (b & 0x7F) << shift; | |
| if ((b & 0x80) == 0) { | |
| // End of varint. | |
| posOut[0] = pos; | |
| return value; | |
| } | |
| shift += 7; | |
| // Do not exceed 35 bits for a 32-bit int. | |
| if (shift >= 35) | |
| return 0; | |
| } | |
| return 0; // Invalid varint. | |
| } | |
| /// Writes a 32-bit varint to the specified output. | |
| /// Returns varint size, or 0 on failure. | |
| public static int Write(byte[]! dst, int value, | |
| /// Offset within dst to write the varint to. | |
| int offset = 0) | |
| { | |
| //if (value < 0) | |
| // return 0; // Reject negative values. | |
| int pos = 0; | |
| for (int i = 0; i < MaxSizeInt; i++) { | |
| int b = value & 0x7F; | |
| value >>= 7; | |
| if (value != 0) { | |
| dst[offset + pos++] = b | 0x80; | |
| } else { | |
| dst[offset + pos++] = b; | |
| return pos; | |
| } | |
| } | |
| return 0; // Number is too large? | |
| } | |
| } | |
| /// Simple LZ4 en/decoder. Goals are to be small and safe. | |
| /// Does not use the LZ4 frame format, and does not perform compression. | |
| public static class Lz4 | |
| { | |
| /// Decompresses a chunk of raw LZ4 data into the output buffer. | |
| /// Returns the number of bytes decompressed, or -1 on corruption. | |
| public static int Decompress( | |
| /// Compressed input data. | |
| byte[] src, | |
| /// Decompressed output. | |
| byte[]! dst, | |
| /// Length of compressed input data. | |
| int compressedSize, | |
| /// Capacity of the output buffer. | |
| int dstCapacity, | |
| /// Offset into the compressed data. | |
| int srcOffset = 0) | |
| { | |
| // Derived from AssetStudio (MIT License): https://github.com/RazTools/AssetStudio/blob/807117cb0b366bf02361b54e34846a14dc8558ee/AssetStudio/LZ4Utils.cs | |
| // NOTE: This function has not been thoroughly tested for out-of-bounds writes. | |
| int srcPos = 0; | |
| int dstPos = 0; | |
| // Iterate through all bytes available. | |
| while (srcPos < compressedSize && dstPos < dstCapacity) { | |
| if (srcPos >= compressedSize) | |
| return -1; // Overflow: no token available. | |
| // Get literal token. | |
| // Low nibble = match length part (encCount), high nibble = literal length (litCount). | |
| int token = src[srcOffset + srcPos++]; | |
| int encCount = token & 0xF; | |
| int litCount = (token >> 4) & 0xF; | |
| // Get literal length (variable/optional). | |
| if (litCount == 0xF) { | |
| int sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; // Overflow: incomplete extension. | |
| sum = src[srcOffset + srcPos++]; | |
| litCount += sum; | |
| } while (sum == 0xFF); | |
| } | |
| // Bounds check literal copy. | |
| if (srcPos + litCount > compressedSize) | |
| return -1; // Overflow: literals exceed input. | |
| if (dstPos + litCount > dstCapacity) | |
| return -1; // Overflow: literals exceed output. | |
| // Copy literal/compressed chunk. | |
| src.CopyTo(srcOffset + srcPos, dst, dstPos, litCount); | |
| srcPos += litCount; | |
| dstPos += litCount; | |
| // Check for end-of-block (last literals, no match follows). | |
| if (srcPos >= compressedSize) | |
| break; // Valid end: last block was literals only. | |
| // Get offset (2 bytes, little-endian). | |
| if (srcPos + 1 >= compressedSize) | |
| return -1; // Overflow: incomplete offset. | |
| int back = src[srcOffset + srcPos] | (src[srcOffset + srcPos + 1] << 8); | |
| srcPos += 2; | |
| // Validate offset. | |
| if (back <= 0 || back > dstPos) | |
| return -1; // Invalid offset (zero or points before start). | |
| // Get extended match length if needed. | |
| if (encCount == 0xF) { | |
| int sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; // Overflow: incomplete extension. | |
| sum = src[srcOffset + srcPos++]; | |
| encCount += sum; | |
| } while (sum == 0xFF); | |
| } | |
| encCount += 4; // Minimum match length is 4. | |
| // Bounds check for match copy. | |
| if (dstPos + encCount > dstCapacity) | |
| return -1; // Overflow: match exceeds output. | |
| int encPos = dstPos - back; | |
| // Copy match (may overlap for repeat patterns). | |
| if (encCount <= back) { | |
| // Non-overlapping: safe bulk copy. | |
| dst.CopyTo(encPos, dst, dstPos, encCount); | |
| dstPos += encCount; | |
| } else { | |
| // Overlapping: manual byte-by-byte to handle repetition. | |
| for (int i = 0; i < encCount; i++) { | |
| dst[dstPos++] = dst[encPos++]; | |
| } | |
| } | |
| } | |
| return dstPos; // Success: return byte count written to dst. | |
| } | |
| /// Gets the maximum size of a compressed buffer, same as LZ4_compressBound. | |
| public static int GetMaxCompressedSize(int inputSize) | |
| { | |
| return inputSize > 0x7E000000 // LZ4_MAX_INPUT_SIZE | |
| ? 0 // Avoid a potential overflow. | |
| : inputSize + (inputSize / 0xFF) + 16; | |
| // return inputSize <= 0xF | |
| // ? inputSize + 1; | |
| // : inputSize + 1 + (inputSize - 0xF + 0xFE) / 0xFF; | |
| } | |
| /// Encodes input into raw LZ4 block format with no framing. | |
| /// Returns the number of bytes written, or -1 on failure. | |
| /// | |
| /// This does not actually compress the data, leaving it | |
| /// larger than it came in just for interoperability. | |
| public static int Compress( | |
| /// Input data. | |
| byte[] src, | |
| /// Destination to write compressed data to. | |
| byte[]! dst, | |
| /// Size of input data buffer. | |
| int srcSize, | |
| /// Capacity of the output buffer. | |
| int dstCapacity, | |
| /// Offset into the destination buffer. | |
| int dstOffset = 0) | |
| { | |
| if (srcSize < 0) | |
| return -1; | |
| int dstPos = 0; | |
| // Encode everything as a single literal block. | |
| int litLen = srcSize; | |
| // Token byte. | |
| int tokenLit = litLen < 0xF ? litLen : 0xF; | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = tokenLit << 4; | |
| // Extra literal length bytes. | |
| if (litLen >= 0xF) { | |
| int len = litLen - 0xF; | |
| while (len >= 0xFF) { | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = 0xFF; | |
| len -= 0xFF; | |
| } | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = len; | |
| } | |
| // Copy literals. | |
| if (dstPos + srcSize > dstCapacity) | |
| return -1; | |
| src.CopyTo(0, dst, dstOffset + dstPos, srcSize); | |
| dstPos += srcSize; | |
| return dstPos; | |
| } | |
| } | |
| /// Deobfuscation for DeNA Sakasho HTTP request/response content. | |
| /// Reverse engineered from Miitomo, may be used elsewhere. | |
| /// They call this "CookedResponse"/"CookedRequestBody" in symbols. | |
| /// | |
| /// Consists of XOR/bit rotation, LZ4 compression, and varint length field. | |
| /// This class just implements the XOR logic and a generic decode method | |
| /// calling the Varint and Lz4 classes implemented here. | |
| public class SakashoObfuscation | |
| { | |
| /// Maximum for the XOR table length. Official player_session_id | |
| /// cookies may push this. So if you're having issues, try increasing this. | |
| const int XorTableLengthMax = 256; | |
| byte[XorTableLengthMax] xorTable; | |
| int xorLen; | |
| /// Builds or rebuilds the internal XOR table. | |
| public void Initialize!( | |
| /// The common key string. | |
| /// Value for Miitomo: 9ec1c78fa2cb34e2bed5691c08432f04 | |
| string commonKey, | |
| /// The value of the player_session_id cookie, | |
| /// or an empty string if the cookie was not set. | |
| string sessionId) | |
| { | |
| xorLen = 0; // Effectively clear/initialize the table. | |
| // Add pre-transformed common key. | |
| for (int i = 0; i < commonKey.Length && xorLen < XorTableLengthMax; i++) { | |
| // Transform the common key, like FUN_0004a120 in libsakasho.so. | |
| byte c = commonKey[i]; | |
| xorTable[xorLen++] = (-0x62 - c) & 0xFF; | |
| } | |
| // Add session ID as string. | |
| for (int i = 0; i < sessionId.Length && xorLen < XorTableLengthMax; i++) { | |
| xorTable[xorLen++] = sessionId[i]; | |
| } | |
| } | |
| /// Applies XOR decoding to the buffer in-place. | |
| public void XorDecode(byte[]! data, int dataLen) | |
| { | |
| XorDecodeBuffer(data, dataLen, xorTable, xorLen); | |
| } | |
| /// Applies XOR encoding to the buffer in-place. | |
| public void XorEncode(byte[]! data, int dataLen) | |
| { | |
| XorEncodeBuffer(data, dataLen, xorTable, xorLen); | |
| } | |
| /// Gets decompressed size from obfuscated/compressed | |
| /// data, or 0 if the size varint is invalid. | |
| public int GetDecompressedSize(byte[] data, byte[]!? posOut = null) | |
| { | |
| // Copy the single varint to a temporary buffer. | |
| byte[Varint.MaxSizeInt] tmp; | |
| data.CopyTo(0, tmp, 0, Varint.MaxSizeInt); | |
| XorDecode(tmp, Varint.MaxSizeInt); // XOR decode. | |
| byte[1] posOutLocal; posOutLocal[0] = 0; // Varint size/offset stored here. | |
| return posOut == null // Optionally use the input posOut. | |
| ? Varint.Read(tmp, posOutLocal) | |
| : Varint.Read(tmp, posOut); | |
| } | |
| /// Fully decodes and decompresses obfuscated bytes. | |
| /// Returns a pre-allocated byte array (must be freed by the caller) | |
| /// or null if the decompression failed. | |
| public byte[]#? Decode(byte[] data, int dataLen) | |
| { | |
| byte[]# tmp = new byte[dataLen]; // Temporary decoded output. | |
| data.CopyTo(0, tmp, 0, dataLen); // Copy the input data. | |
| XorDecode(tmp, dataLen); // XOR decode. | |
| // Get the decompressed size. | |
| byte[1] posOut; posOut[0] = 0; // Varint offset stored here. | |
| // int size = GetDecompressedSize(data, posOut); | |
| int size = Varint.Read(tmp, posOut); | |
| if (size <= 0) | |
| return null; // Bad/corrupted size value. | |
| // Sanity check a max size of 100 MB. | |
| assert size <= 100 * 1024 * 1024; | |
| // Decompress the deobfuscated bytes. | |
| byte[]# output = new byte[size]; | |
| int decompressed = Lz4.Decompress( | |
| /*data*/ tmp, output, dataLen - posOut[0], // Subtract varint size from length. | |
| size, posOut[0]); // Last arg = compressed offset = varint size. | |
| if (decompressed <= 0) | |
| return null; // Decompression failed. | |
| return output; // Must be freed by the caller in C. | |
| } | |
| /// Fully compresses and obfuscates raw bytes. | |
| /// Returned a pre-allocated byte array (must be freed by the caller) | |
| /// containing the compressed and obfuscated data, or null on failure. | |
| /// | |
| /// NOTE: The length of the array is posOut. You must trim the output. | |
| /// Example: const len = new Uint8Array([0]); | |
| /// let result = obfs.encode(in, size, len); result = result.subarray(0, len); | |
| public byte[]#? Encode(byte[] data, int dataLen, | |
| /// Array where the 0th element is the output size. | |
| int[]! posOut) | |
| { | |
| // Get the maximum size/worst-case scenario to know | |
| // how much to allocate the compressed data buffer. | |
| int maxCompressed = Lz4.GetMaxCompressedSize(dataLen); | |
| if (maxCompressed <= 0) | |
| return null; // Input data is too long or short. | |
| // Allocate buffer to hold maximum for compressed data and varint. | |
| byte[]# buffer = new byte[Varint.MaxSizeInt + maxCompressed]; | |
| // Write the size of the uncompressed data. | |
| int varintSize = Varint.Write(buffer, dataLen, 0); | |
| if (varintSize <= 0) | |
| return null; // Input data is larger than varint max size. | |
| // Compress the input data and write it after the varint. | |
| int compressedSize = Lz4.Compress(data, buffer, dataLen, maxCompressed, | |
| varintSize); // Use the varint size as the offset. | |
| if (compressedSize <= 0) | |
| return null; // Compression failed. | |
| // Actual size of compressed payload. The buffer is larger than this. | |
| int totalSize = varintSize + compressedSize; | |
| // Compression is finished. XOR encode the whole stream. | |
| XorEncode(buffer, totalSize); | |
| // Make a new buffer of the exact compressed size, copy to and return that. | |
| // byte[]# result = new byte[totalSize]; | |
| // buffer.CopyTo(0, result, 0, totalSize); | |
| // if (posOut != null) posOut[0] = totalSize; | |
| // return result; | |
| // Instead, the caller must trim the output. | |
| posOut[0] = totalSize; | |
| return buffer; | |
| } | |
| /// Applies the conditional XOR/bit rotation operation to decode the data. | |
| /// Reference: libsaksho.so:FUN_0004ec70, Java_jp_dena_sakasho_core_delegate_CookedResponseDelegate_cookResponse | |
| public static void XorDecodeBuffer(byte[]! data, int dataLen, byte[] table, int tableLen) | |
| { | |
| for (int i = 0; i < dataLen; i++) { | |
| int keyByte = table[(i + 1) % tableLen]; | |
| int inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| // Perform XOR. | |
| data[i] = inputByte ^ keyByte; | |
| } else { | |
| int shift = keyByte & 7; | |
| // Perform bit rotation (decode direction). | |
| data[i] = ((inputByte >> (8 - shift)) | (inputByte << shift)) & 0xFF; | |
| } | |
| } | |
| } | |
| /// Applies the conditional XOR/bit rotation operation to encode the data. | |
| /// Reference: libsaksho.so:FUN_0004ebc0, Java_jp_dena_sakasho_core_http_CookedRequestBody_cookRequest | |
| public static void XorEncodeBuffer(byte[]! data, int dataLen, byte[] table, int tableLen) | |
| { | |
| for (int i = 0; i < dataLen; i++) { | |
| int keyByte = table[(i + 1) % tableLen]; | |
| int inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| // Perform XOR. | |
| data[i] = inputByte ^ keyByte; | |
| } else { | |
| int shift = keyByte & 7; | |
| // Perform bit rotation (encode direction). | |
| data[i] = ((inputByte << (8 - shift)) | (inputByte >> shift)) & 0xFF; | |
| } | |
| } | |
| } | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // Generated automatically with "fut". Do not edit. | |
| /** | |
| * Reads and writes Base128 Varints used in Protobufs, DWARF... | |
| */ | |
| export class Varint { | |
| /** | |
| * @private | |
| */ | |
| constructor() { | |
| } | |
| /** | |
| * Maximum size for a 32-bit integer as a varint. | |
| * @public | |
| * @static | |
| * @readonly | |
| * @default 5 | |
| */ | |
| static MAX_SIZE_INT = 5; | |
| /** | |
| * Reads a varint from data starting at posOut[0]. | |
| * Advances posOut[0] to indicate how large the varint was. | |
| * Maximum is a 32-bit int. Returns 0 on invalid varint. | |
| * @public | |
| * @static | |
| * @param {Readonly<Uint8Array>} data Byte array containing the varint. | |
| * @param {Uint8Array} posOut Single-element array containing the cursor. | |
| * @returns {number} | |
| */ | |
| static read(data, posOut) { | |
| let value = 0; | |
| let shift = 0; | |
| let pos = posOut[0]; | |
| for (let i = 0; i < 5; i++) { | |
| let b = data[pos++]; | |
| value |= (b & 127) << shift; | |
| if ((b & 128) == 0) { | |
| posOut[0] = pos; | |
| return value; | |
| } | |
| shift += 7; | |
| if (shift >= 35) | |
| return 0; | |
| } | |
| return 0; | |
| } | |
| /** | |
| * Writes a 32-bit varint to the specified output. | |
| * Returns varint size, or 0 on failure. | |
| * @public | |
| * @static | |
| * @param {Uint8Array} dst | |
| * @param {number} value | |
| * @param {number} [offset=0] Offset within dst to write the varint to. | |
| * @returns {number} | |
| */ | |
| static write(dst, value, offset = 0) { | |
| let pos = 0; | |
| for (let i = 0; i < 5; i++) { | |
| let b = value & 127; | |
| value >>= 7; | |
| if (value != 0) { | |
| dst[offset + pos++] = b | 128; | |
| } | |
| else { | |
| dst[offset + pos++] = b; | |
| return pos; | |
| } | |
| } | |
| return 0; | |
| } | |
| } | |
| /** | |
| * Simple LZ4 en/decoder. Goals are to be small and safe. | |
| * Does not use the LZ4 frame format, and does not perform compression. | |
| */ | |
| export class Lz4 { | |
| /** | |
| * @private | |
| */ | |
| constructor() { | |
| } | |
| /** | |
| * Decompresses a chunk of raw LZ4 data into the output buffer. | |
| * Returns the number of bytes decompressed, or -1 on corruption. | |
| * @public | |
| * @static | |
| * @param {Readonly<Uint8Array>} src Compressed input data. | |
| * @param {Uint8Array} dst Decompressed output. | |
| * @param {number} compressedSize Length of compressed input data. | |
| * @param {number} dstCapacity Capacity of the output buffer. | |
| * @param {number} [srcOffset=0] Offset into the compressed data. | |
| * @returns {number} | |
| */ | |
| static decompress(src, dst, compressedSize, dstCapacity, srcOffset = 0) { | |
| let srcPos = 0; | |
| let dstPos = 0; | |
| while (srcPos < compressedSize && dstPos < dstCapacity) { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| let token = src[srcOffset + srcPos++]; | |
| let encCount = token & 15; | |
| let litCount = token >> 4 & 15; | |
| if (litCount == 15) { | |
| let sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| sum = src[srcOffset + srcPos++]; | |
| litCount += sum; | |
| } while (sum == 255); | |
| } | |
| if (srcPos + litCount > compressedSize) | |
| return -1; | |
| if (dstPos + litCount > dstCapacity) | |
| return -1; | |
| dst.set(src.subarray(srcOffset + srcPos, srcOffset + srcPos + litCount), dstPos); | |
| srcPos += litCount; | |
| dstPos += litCount; | |
| if (srcPos >= compressedSize) | |
| break; | |
| if (srcPos + 1 >= compressedSize) | |
| return -1; | |
| let back = src[srcOffset + srcPos] | src[srcOffset + srcPos + 1] << 8; | |
| srcPos += 2; | |
| if (back <= 0 || back > dstPos) | |
| return -1; | |
| if (encCount == 15) { | |
| let sum = 0; | |
| do { | |
| if (srcPos >= compressedSize) | |
| return -1; | |
| sum = src[srcOffset + srcPos++]; | |
| encCount += sum; | |
| } while (sum == 255); | |
| } | |
| encCount += 4; | |
| if (dstPos + encCount > dstCapacity) | |
| return -1; | |
| let encPos = dstPos - back; | |
| if (encCount <= back) { | |
| dst.set(dst.subarray(encPos, encPos + encCount), dstPos); | |
| dstPos += encCount; | |
| } | |
| else { | |
| for (let i = 0; i < encCount; i++) { | |
| dst[dstPos++] = dst[encPos++]; | |
| } | |
| } | |
| } | |
| return dstPos; | |
| } | |
| /** | |
| * Gets the maximum size of a compressed buffer, same as LZ4_compressBound. | |
| * @public | |
| * @static | |
| * @param {number} inputSize | |
| * @returns {number} | |
| */ | |
| static getMaxCompressedSize(inputSize) { | |
| return inputSize > 2113929216 ? 0 : inputSize + (inputSize / 255 | 0) + 16; | |
| } | |
| /** | |
| * Encodes input into raw LZ4 block format with no framing. | |
| * | |
| * <p>Returns the number of bytes written, or -1 on failure. | |
| * <p>This does not actually compress the data, leaving it | |
| * larger than it came in just for interoperability. | |
| * @public | |
| * @static | |
| * @param {Readonly<Uint8Array>} src Input data. | |
| * @param {Uint8Array} dst Destination to write compressed data to. | |
| * @param {number} srcSize Size of input data buffer. | |
| * @param {number} dstCapacity Capacity of the output buffer. | |
| * @param {number} [dstOffset=0] Offset into the destination buffer. | |
| * @returns {number} | |
| */ | |
| static compress(src, dst, srcSize, dstCapacity, dstOffset = 0) { | |
| if (srcSize < 0) | |
| return -1; | |
| let dstPos = 0; | |
| let litLen = srcSize; | |
| let tokenLit = litLen < 15 ? litLen : 15; | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = tokenLit << 4; | |
| if (litLen >= 15) { | |
| let len = litLen - 15; | |
| while (len >= 255) { | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = 255; | |
| len -= 255; | |
| } | |
| if (dstPos >= dstCapacity) | |
| return -1; | |
| dst[dstOffset + dstPos++] = len; | |
| } | |
| if (dstPos + srcSize > dstCapacity) | |
| return -1; | |
| dst.set(src.subarray(0, srcSize), dstOffset + dstPos); | |
| dstPos += srcSize; | |
| return dstPos; | |
| } | |
| } | |
| /** | |
| * Deobfuscation for DeNA Sakasho HTTP request/response content. | |
| * | |
| * <p>Reverse engineered from Miitomo, may be used elsewhere. | |
| * They call this "CookedResponse"/"CookedRequestBody" in symbols. | |
| * <p>Consists of XOR/bit rotation, LZ4 compression, and varint length field. | |
| * This class just implements the XOR logic and a generic decode method | |
| * calling the Varint and Lz4 classes implemented here. | |
| */ | |
| export class SakashoObfuscation { | |
| /** | |
| * Maximum for the XOR table length. Official player_session_id | |
| * cookies may push this. So if you're having issues, try increasing this. | |
| * @static | |
| * @readonly | |
| * @default 256 | |
| */ | |
| static #XOR_TABLE_LENGTH_MAX = 256; | |
| /** | |
| * @readonly | |
| * @default Uint8Array | |
| */ | |
| #xorTable = new Uint8Array(256); | |
| #xorLen; | |
| /** | |
| * Builds or rebuilds the internal XOR table. | |
| * @public | |
| * @param {string} commonKey The common key string. | |
| * @param {string} sessionId The value of the player_session_id cookie, | |
| * or an empty string if the cookie was not set. | |
| * @returns {void} | |
| */ | |
| initialize(commonKey, sessionId) { | |
| this.#xorLen = 0; | |
| for (let i = 0; i < commonKey.length && this.#xorLen < 256; i++) { | |
| let c = commonKey.charCodeAt(i); | |
| this.#xorTable[this.#xorLen++] = (-98 - c) & 255; | |
| } | |
| for (let i = 0; i < sessionId.length && this.#xorLen < 256; i++) { | |
| this.#xorTable[this.#xorLen++] = sessionId.charCodeAt(i); | |
| } | |
| } | |
| /** | |
| * Applies XOR decoding to the buffer in-place. | |
| * @public | |
| * @param {Uint8Array} data | |
| * @param {number} dataLen | |
| * @returns {void} | |
| */ | |
| xorDecode(data, dataLen) { | |
| SakashoObfuscation.xorDecodeBuffer(data, dataLen, this.#xorTable, this.#xorLen); | |
| } | |
| /** | |
| * Applies XOR encoding to the buffer in-place. | |
| * @public | |
| * @param {Uint8Array} data | |
| * @param {number} dataLen | |
| * @returns {void} | |
| */ | |
| xorEncode(data, dataLen) { | |
| SakashoObfuscation.xorEncodeBuffer(data, dataLen, this.#xorTable, this.#xorLen); | |
| } | |
| /** | |
| * Gets decompressed size from obfuscated/compressed | |
| * data, or 0 if the size varint is invalid. | |
| * @public | |
| * @param {Readonly<Uint8Array>} data | |
| * @param {Uint8Array | null} [posOut=null] | |
| * @returns {number} | |
| */ | |
| getDecompressedSize(data, posOut = null) { | |
| const tmp = new Uint8Array(5); | |
| tmp.set(data.subarray(0, 5)); | |
| this.xorDecode(tmp, 5); | |
| const posOutLocal = new Uint8Array(1); | |
| posOutLocal[0] = 0; | |
| return posOut == null ? Varint.read(tmp, posOutLocal) : Varint.read(tmp, posOut); | |
| } | |
| /** | |
| * Fully decodes and decompresses obfuscated bytes. | |
| * Returns a pre-allocated byte array (must be freed by the caller) | |
| * or null if the decompression failed. | |
| * @public | |
| * @param {Readonly<Uint8Array>} data | |
| * @param {number} dataLen | |
| * @returns {Uint8Array | null} | |
| */ | |
| decode(data, dataLen) { | |
| let tmp = new Uint8Array(dataLen); | |
| tmp.set(data.subarray(0, dataLen)); | |
| this.xorDecode(tmp, dataLen); | |
| const posOut = new Uint8Array(1); | |
| posOut[0] = 0; | |
| let size = Varint.read(tmp, posOut); | |
| if (size <= 0) | |
| return null; | |
| console.assert(size <= 104857600); | |
| let output = new Uint8Array(size); | |
| let decompressed = Lz4.decompress(tmp, output, dataLen - posOut[0], size, posOut[0]); | |
| if (decompressed <= 0) | |
| return null; | |
| return output; | |
| } | |
| /** | |
| * Fully compresses and obfuscates raw bytes. | |
| * | |
| * <p>Returned a pre-allocated byte array (must be freed by the caller) | |
| * containing the compressed and obfuscated data, or null on failure. | |
| * <p>NOTE: The length of the array is posOut. You must trim the output. | |
| * Example: const len = new Uint8Array([0]); | |
| * let result = obfs.encode(in, size, len); result = result.subarray(0, len); | |
| * @public | |
| * @param {Readonly<Uint8Array>} data | |
| * @param {number} dataLen | |
| * @param {Int32Array} posOut Array where the 0th element is the output size. | |
| * @returns {Uint8Array | null} | |
| */ | |
| encode(data, dataLen, posOut) { | |
| let maxCompressed = Lz4.getMaxCompressedSize(dataLen); | |
| if (maxCompressed <= 0) | |
| return null; | |
| let buffer = new Uint8Array(5 + maxCompressed); | |
| let varintSize = Varint.write(buffer, dataLen, 0); | |
| if (varintSize <= 0) | |
| return null; | |
| let compressedSize = Lz4.compress(data, buffer, dataLen, maxCompressed, varintSize); | |
| if (compressedSize <= 0) | |
| return null; | |
| let totalSize = varintSize + compressedSize; | |
| this.xorEncode(buffer, totalSize); | |
| posOut[0] = totalSize; | |
| return buffer; | |
| } | |
| /** | |
| * Applies the conditional XOR/bit rotation operation to decode the data. | |
| * Reference: libsaksho.so:FUN_0004ec70, Java_jp_dena_sakasho_core_delegate_CookedResponseDelegate_cookResponse | |
| * @public | |
| * @static | |
| * @param {Uint8Array} data | |
| * @param {number} dataLen | |
| * @param {Readonly<Uint8Array>} table | |
| * @param {number} tableLen | |
| * @returns {void} | |
| */ | |
| static xorDecodeBuffer(data, dataLen, table, tableLen) { | |
| for (let i = 0; i < dataLen; i++) { | |
| let keyByte = table[(i + 1) % tableLen]; | |
| let inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| data[i] = inputByte ^ keyByte; | |
| } | |
| else { | |
| let shift = keyByte & 7; | |
| data[i] = (inputByte >> (8 - shift) | inputByte << shift) & 255; | |
| } | |
| } | |
| } | |
| /** | |
| * Applies the conditional XOR/bit rotation operation to encode the data. | |
| * Reference: libsaksho.so:FUN_0004ebc0, Java_jp_dena_sakasho_core_http_CookedRequestBody_cookRequest | |
| * @public | |
| * @static | |
| * @param {Uint8Array} data | |
| * @param {number} dataLen | |
| * @param {Readonly<Uint8Array>} table | |
| * @param {number} tableLen | |
| * @returns {void} | |
| */ | |
| static xorEncodeBuffer(data, dataLen, table, tableLen) { | |
| for (let i = 0; i < dataLen; i++) { | |
| let keyByte = table[(i + 1) % tableLen]; | |
| let inputByte = data[i]; | |
| if ((keyByte & 7) == 0) { | |
| data[i] = inputByte ^ keyByte; | |
| } | |
| else { | |
| let shift = keyByte & 7; | |
| data[i] = (inputByte << (8 - shift) | inputByte >> shift) & 255; | |
| } | |
| } | |
| } | |
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| Contents of test-response.bin in Base64: | |
| dmSHgPaISGMWL1ZOiB23IrfdNzdrSy0sADyppku9m/oOlcnctPY3EUcTYoWIhTOx9W5He5PK1zIsjrAR6EQouxY1dSE1U1mVrBkW2zr0BoqtkxYFypAjQSu3CUUKFLO8NgoUElKCCxVyujGjFQqSUAkmUKFoFAoBAD4laODMKB8kNaAsI8lhNLNBch1V1FQpxFW0N4EFgjPah6ut0IqKwQDdRQWxO4Q2UBjYgkqCcqIgAP4pqneIZBG1S9KjAACuABggAGMFvvX114NXSgk71FAAAB4APwK+gxpi1JItKUumKBLsqh16ilVo4gDBAQAwKgCCkQAWZrCImyzWXRUeox25LEBRIqCTLebCBNK4EWwAkgWTe9zbus1sso0AQ0RhchZNOhD2yQULBE+MEYhEdERN2DLmTankGxgmkaFWmxwZasKInd5LNlUoETmFa0SObRqNFmoDYRqNhQMHsMymVAMFFhGxoVE5sFOjPZwJACBjC2p8IADjck3VFojmgysbLVmxNbRLpLLZ8kDhQGQcRDFaGo7QRlgxEegRYAwGDFqBidLE1ADhUHZZ5lwrAhF0aRYRzLQnuegFK5Pmq1nrCBFEYUTOyV0rRy3m7FOyVkcJADA/K2NsvbW1AAAfACd7F5GJXrJ0t0JiADVqWZVLuXbpZjPxAAS5iECItlUAUNtyVkA4ClbN8yAGI01tWETorB6bUIhfYYi7swA8OQCFC7kvAMNKAFgxV9Fwh5gAFv8IM1g73idLOlkKWE6FzaxaowOXAAU8LQD4YuolC4gcVyADOlSDyFhSnPa3tdMQkIFhOLoNt3vkMlo+WOgrXDCDmQtKzNj22zr6wQvgpUAEZG0QWMBgwkR3FhvGLgBgiShY54FkFiK03JajS8LYB9QAAoD4AB70OBjvJmct3LQmxrLXMx2Tcy3R1ZysIgLRnKsd9fULEVc4IyxHyozXhaOIoyZDyZFwDpknhpm+sEQsGzP2TTcM9aXmzL37AK4EJmyFiJs17lmb0a+xjaUr5jqSgngijIveGDFCSXP5FBGq1IgWiDbC5ju6WOxVROiIrBbSSwAgudFvsD0zjTzXWJyxhtfKJWOFeHG8YDPJ0itzI+tUpc06+uC6mDxa5kPKVGcAQPAAyFYcPPqFc94BALIAAHsAxsZW0mdW10ddawBj5FkBwADrFrnW9ZtDwuQxuTuvbIQgBHg4AJEgDwAdr1fmM9s2BHu7rJF93I72PAS+AHyQAAAhABhrLLaCAIBmABCRAAMrc8JVgJ5AOLfN6KyTD2ZNNhQ3jSesleYIWIDswq1ZVFAkHascAoVpGLBz2oD4SHYb28QLxm+xGLKNW0SObc0AAwoAGjNYjUPAAFirTABaxQCC0DCAkjA4RwDfW7LWmMoim4AAdiA2t8nSc6ND65wAEJ8AIhZRUMMMAAo3ugDfDizmhlonc3UADM/tHdDjAJB90FbCJ0dPAD7UAJPKOTeb7rInEQAgvAAOC1g8KQAgOOMAPUtD9g4kAEABhAABgQBveQAIjb1brWw6ECEIdywEAYMjANMN9tqsHYUTkQDgAgDT2lm5biyzsoAAocNQ+Es6Fzat1i2lcgBUACsI89fYJW4de7W0PYoAENAtAL8rhlib3CvGMwF3ggUIAgoAh2AAAuCiAEEoM+HcAGB//1gZubqJ5wCHbgBPZ1qbOifbyCUb0bhyQ5gACP0QAAmAGojo1QgKkEGgAcYL2lXksNcbbMrmWRejyQvc3C1UAHhBABieR8LWB0cAPX8SC8Yzs5PeuuYyvkcLE0wAXSYLuo5768q1VzsAgEJgPAC25rfdNg0jO6x9hdiOlk6xQSgwADgJACe5cywHvi0ZFUAAwyAAChAgGyhgZLmvVghigQlAyQEMlo22rNH1XSvKjL6O2x0xjcYADIOCEpH6FrE0tC1nWXUEh5BDEUqtiAvm3JU61zLed3M22yxU67WlzRHPWxCvO7I1h44wIEmIu4E4V9zQD5uV6G9C4AQYZKFhEZE5QX05K+K6w1YBBQAM1gABs22oyjcdKV4UGGlAyZvKu5sTAAwzAjBpwFAIAHbQAFkiIAoMAEngJ3sTwsQ05fpGACAwChC7GGBMACCVgmUiOVm2ATEAQNAAiW71ACiAMXgA/UAa5rJj4QAIOoAISRAgwgB761iIuc1ukYMUADwINTZdMRf2rT0gAUfdMeilDED4QAevCxG1vKpgPDlgAIgYJ4DIHeb+sCCt4oAYTQADVQMYGVgiy7JYnDo05OQGVCI1tzc6ocEFeDAjOV6IjpgjC0QzK7HKEpxoAUi+UQCwBmgAwK+b6JPBNoh0EWIJchzCaRUOBZwtg72TUAKBiwIIyYAATQAEinAg9VV7FCtEYpjeAIiWdhQF+oPk3j/jUAf1AKpisBpLNwQAAMwBIbWMgIZuAAD+AAwkIBBIHJv6FKW1WjolAABcAAqUAYUR6LWlE2EYAAAWgSDIAgyGIIImlko6DTcc967JJ1k2xi+HIAHBKCGMI6W5CCEEeBCgsbch5v0ACMOQNY9wkM2j+gLbgIg33uYrEQtERC21lW4Zxj2QAAUBMAcQMABDgMNGACDP6CLLdAB3QncAALFICMrAOxs4AD0EABpWl75L6FXamkB4B/7ACCmTnCBA3kSYAAqRA8ByhxQVwojmTWvYCCA/niACbQCCSqCAoSQYWLwAj6Z5ADZ8C3kAggGrSIYg3IQAzWiQ6kMsBMthACgAgON4GITCh6oAMAaDasYENGCKoGTX6hQWAykAoMSFPKQAyFH0OIA0YADJAGTVKV0CGgIG3Jsxu0IAvYGCqhgIzwEA+E4ApYCgYSJcsgbvCBhewMavHLfgV2OwnC1EL9wAYWYAWx4DIoVStDYSAFk0QcCLYDDMIgud2hWbRxHoYOuwMGCGDAWxe9w6hVAR9utYEggFAFRAAjqCSUEImcKc60AWIKgwwPrCRHJww4E4CHQTCluSYAC8IJjGJyv6CAwJMHoZxpEH0jA59ySGUwdMFiI1oNKgO0J66w1VlEpUdlHXajlNUVEp9SkqypSmKdM5EegixlMrucqZmYasGLKNboYTZ1NbM01miXMGyFZMPsRsYIxZTGMHmQvYMxGpAIxEoCqiF0VSU4rFILCZRRvkgXJM9Q== | |
| */ | |
| public class Test | |
| { | |
| public static void Main() | |
| { | |
| byte[] obfuscated = resource<byte[]>("test-response.bin"); | |
| int obfuscatedLength = 2353; | |
| string testSessionId = "ExfvbaLYekTosbFMNdcDEEoZ1C3a1n7CuPd89x4FDkFdbXWsOCbjPCb-V9qsVmdAyqHLZZwU401pQ6uwr6ij9gtEyRk0KVfSxLXQ9Qn0II0-XzBryDb_P-Nwp24yFoMUES8NNQ5o12Proeg07aRhKOTBH-F-WrsjoUM4ngdOLTZ48upeOXaE7ALa74uUOoXN"; | |
| SakashoObfuscation() obfs; | |
| obfs.Initialize( | |
| "9ec1c78fa2cb34e2bed5691c08432f04", | |
| testSessionId); | |
| // Decoding | |
| int decSize = obfs.GetDecompressedSize(obfuscated); | |
| if (decSize <= 0) | |
| { | |
| Console.WriteLine("Invalid input size."); | |
| return; | |
| } | |
| byte[]#? decoded = obfs.Decode(obfuscated, obfuscatedLength); | |
| if (decoded == null) | |
| { | |
| Console.WriteLine("Decoding failed."); | |
| return; | |
| } | |
| // Encoding | |
| int[1] cmpSize; cmpSize[0] = 0; | |
| byte[]#? encoded = obfs.Encode(decoded, decSize, cmpSize); | |
| if (encoded == null) | |
| { | |
| Console.WriteLine("Encoding failed."); | |
| return; | |
| } | |
| byte[]# encodedFinal = new byte[cmpSize[0]]; | |
| encoded.CopyTo(0, encodedFinal, 0, cmpSize[0]); | |
| byte[]#? reDecoded = obfs.Decode(obfuscated, obfuscatedLength); | |
| if (reDecoded == null) | |
| { | |
| Console.WriteLine("RE-decoding failed."); | |
| return; | |
| } | |
| string() reDecodedString = Encoding.UTF8.GetString(reDecoded, 0, decSize); | |
| Console.WriteLine(reDecodedString); | |
| } | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment