Browse Source

2.6.30: add lzma support to squashfs through pcomp

SVN-Revision: 15934
Felix Fietkau 16 years ago
parent
commit
f0fe3392d1

+ 7 - 3
target/linux/generic-2.6/config-2.6.30

@@ -322,7 +322,7 @@ CONFIG_CRYPTO_AEAD2=m
 # CONFIG_CRYPTO_AEAD is not set
 # CONFIG_CRYPTO_AES_586 is not set
 CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ALGAPI2=m
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_ALGAPI=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_ANUBIS is not set
@@ -363,7 +363,7 @@ CONFIG_CRYPTO_MANAGER=m
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_PCBC is not set
-CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_PCOMP=y
 # CONFIG_CRYPTO_PRNG is not set
 # CONFIG_CRYPTO_RMD128 is not set
 # CONFIG_CRYPTO_RMD160 is not set
@@ -385,12 +385,13 @@ CONFIG_CRYPTO_RNG2=m
 # CONFIG_CRYPTO_TWOFISH_586 is not set
 # CONFIG_CRYPTO_TWOFISH_COMMON is not set
 # CONFIG_CRYPTO_TWOFISH is not set
+CONFIG_CRYPTO_UNLZMA=y
 CONFIG_CRYPTO_WORKQUEUE=m
 # CONFIG_CRYPTO_WP512 is not set
 # CONFIG_CRYPTO_XCBC is not set
 # CONFIG_CRYPTO_XTS is not set
 CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_ZLIB=y
 # CONFIG_DAB is not set
 # CONFIG_DAVICOM_PHY is not set
 # CONFIG_DCB is not set
@@ -1307,6 +1308,7 @@ CONFIG_NFS_V4=y
 # CONFIG_NFTL is not set
 # CONFIG_NILFS2_FS is not set
 CONFIG_NL80211=y
+CONFIG_NLATTR=y
 # CONFIG_NLS_ASCII is not set
 # CONFIG_NLS_CODEPAGE_1250 is not set
 # CONFIG_NLS_CODEPAGE_1251 is not set
@@ -1947,6 +1949,8 @@ CONFIG_SND_VERBOSE_PROCFS=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_SQUASHFS_EMBEDDED is not set
 CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+CONFIG_SQUASHFS_SUPPORT_LZMA=y
+CONFIG_SQUASHFS_SUPPORT_ZLIB=y
 # CONFIG_SQUASHFS_VMALLOC is not set
 CONFIG_SQUASHFS=y
 # CONFIG_SSB_BLOCKIO is not set

+ 280 - 0
target/linux/generic-2.6/patches-2.6.30/050-pcomp_update.patch

@@ -0,0 +1,280 @@
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -914,24 +914,25 @@ static int test_pcomp(struct crypto_pcom
+ 	const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
+ 	unsigned int i;
+ 	char result[COMP_BUF_SIZE];
+-	int error;
++	int res;
+ 
+ 	for (i = 0; i < ctcount; i++) {
+ 		struct comp_request req;
++		unsigned int produced = 0;
+ 
+-		error = crypto_compress_setup(tfm, ctemplate[i].params,
+-					      ctemplate[i].paramsize);
+-		if (error) {
++		res = crypto_compress_setup(tfm, ctemplate[i].params,
++					    ctemplate[i].paramsize);
++		if (res) {
+ 			pr_err("alg: pcomp: compression setup failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
+ 
+-		error = crypto_compress_init(tfm);
+-		if (error) {
++		res = crypto_compress_init(tfm);
++		if (res) {
+ 			pr_err("alg: pcomp: compression init failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
+ 
+ 		memset(result, 0, sizeof(result));
+@@ -941,32 +942,37 @@ static int test_pcomp(struct crypto_pcom
+ 		req.next_out = result;
+ 		req.avail_out = ctemplate[i].outlen / 2;
+ 
+-		error = crypto_compress_update(tfm, &req);
+-		if (error && (error != -EAGAIN || req.avail_in)) {
++		res = crypto_compress_update(tfm, &req);
++		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
+ 			pr_err("alg: pcomp: compression update failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		if (res > 0)
++			produced += res;
+ 
+ 		/* Add remaining input data */
+ 		req.avail_in += (ctemplate[i].inlen + 1) / 2;
+ 
+-		error = crypto_compress_update(tfm, &req);
+-		if (error && (error != -EAGAIN || req.avail_in)) {
++		res = crypto_compress_update(tfm, &req);
++		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
+ 			pr_err("alg: pcomp: compression update failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		if (res > 0)
++			produced += res;
+ 
+ 		/* Provide remaining output space */
+ 		req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
+ 
+-		error = crypto_compress_final(tfm, &req);
+-		if (error) {
++		res = crypto_compress_final(tfm, &req);
++		if (res < 0) {
+ 			pr_err("alg: pcomp: compression final failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		produced += res;
+ 
+ 		if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
+ 			pr_err("alg: comp: Compression test %d failed for %s: "
+@@ -976,6 +982,13 @@ static int test_pcomp(struct crypto_pcom
+ 			return -EINVAL;
+ 		}
+ 
++		if (produced != ctemplate[i].outlen) {
++			pr_err("alg: comp: Compression test %d failed for %s: "
++			       "returned len = %u (expected %d)\n", i + 1,
++			       algo, produced, ctemplate[i].outlen);
++			return -EINVAL;
++		}
++
+ 		if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
+ 			pr_err("alg: pcomp: Compression test %d failed for "
+ 			       "%s\n", i + 1, algo);
+@@ -986,21 +999,21 @@ static int test_pcomp(struct crypto_pcom
+ 
+ 	for (i = 0; i < dtcount; i++) {
+ 		struct comp_request req;
++		unsigned int produced = 0;
+ 
+-		error = crypto_decompress_setup(tfm, dtemplate[i].params,
+-						dtemplate[i].paramsize);
+-		if (error) {
++		res = crypto_decompress_setup(tfm, dtemplate[i].params,
++					      dtemplate[i].paramsize);
++		if (res) {
+ 			pr_err("alg: pcomp: decompression setup failed on "
+-			       "test %d for %s: error=%d\n", i + 1, algo,
+-			       error);
+-			return error;
++			       "test %d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
+ 
+-		error = crypto_decompress_init(tfm);
+-		if (error) {
++		res = crypto_decompress_init(tfm);
++		if (res) {
+ 			pr_err("alg: pcomp: decompression init failed on test "
+-			       "%d for %s: error=%d\n", i + 1, algo, error);
+-			return error;
++			       "%d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
+ 
+ 		memset(result, 0, sizeof(result));
+@@ -1010,35 +1023,38 @@ static int test_pcomp(struct crypto_pcom
+ 		req.next_out = result;
+ 		req.avail_out = dtemplate[i].outlen / 2;
+ 
+-		error = crypto_decompress_update(tfm, &req);
+-		if (error  && (error != -EAGAIN || req.avail_in)) {
++		res = crypto_decompress_update(tfm, &req);
++		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
+ 			pr_err("alg: pcomp: decompression update failed on "
+-			       "test %d for %s: error=%d\n", i + 1, algo,
+-			       error);
+-			return error;
++			       "test %d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		if (res > 0)
++			produced += res;
+ 
+ 		/* Add remaining input data */
+ 		req.avail_in += (dtemplate[i].inlen + 1) / 2;
+ 
+-		error = crypto_decompress_update(tfm, &req);
+-		if (error  && (error != -EAGAIN || req.avail_in)) {
++		res = crypto_decompress_update(tfm, &req);
++		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
+ 			pr_err("alg: pcomp: decompression update failed on "
+-			       "test %d for %s: error=%d\n", i + 1, algo,
+-			       error);
+-			return error;
++			       "test %d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		if (res > 0)
++			produced += res;
+ 
+ 		/* Provide remaining output space */
+ 		req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
+ 
+-		error = crypto_decompress_final(tfm, &req);
+-		if (error  && (error != -EAGAIN || req.avail_in)) {
++		res = crypto_decompress_final(tfm, &req);
++		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
+ 			pr_err("alg: pcomp: decompression final failed on "
+-			       "test %d for %s: error=%d\n", i + 1, algo,
+-			       error);
+-			return error;
++			       "test %d for %s: error=%d\n", i + 1, algo, res);
++			return res;
+ 		}
++		if (res > 0)
++			produced += res;
+ 
+ 		if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
+ 			pr_err("alg: comp: Decompression test %d failed for "
+@@ -1048,6 +1064,13 @@ static int test_pcomp(struct crypto_pcom
+ 			return -EINVAL;
+ 		}
+ 
++		if (produced != dtemplate[i].outlen) {
++			pr_err("alg: comp: Decompression test %d failed for "
++			       "%s: returned len = %u (expected %d)\n", i + 1,
++			       algo, produced, dtemplate[i].outlen);
++			return -EINVAL;
++		}
++
+ 		if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
+ 			pr_err("alg: pcomp: Decompression test %d failed for "
+ 			       "%s\n", i + 1, algo);
+--- a/crypto/zlib.c
++++ b/crypto/zlib.c
+@@ -165,15 +165,15 @@ static int zlib_compress_update(struct c
+ 		return -EINVAL;
+ 	}
+ 
++	ret = req->avail_out - stream->avail_out;
+ 	pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+ 		 stream->avail_in, stream->avail_out,
+-		 req->avail_in - stream->avail_in,
+-		 req->avail_out - stream->avail_out);
++		 req->avail_in - stream->avail_in, ret);
+ 	req->next_in = stream->next_in;
+ 	req->avail_in = stream->avail_in;
+ 	req->next_out = stream->next_out;
+ 	req->avail_out = stream->avail_out;
+-	return 0;
++	return ret;
+ }
+ 
+ static int zlib_compress_final(struct crypto_pcomp *tfm,
+@@ -195,15 +195,15 @@ static int zlib_compress_final(struct cr
+ 		return -EINVAL;
+ 	}
+ 
++	ret = req->avail_out - stream->avail_out;
+ 	pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+ 		 stream->avail_in, stream->avail_out,
+-		 req->avail_in - stream->avail_in,
+-		 req->avail_out - stream->avail_out);
++		 req->avail_in - stream->avail_in, ret);
+ 	req->next_in = stream->next_in;
+ 	req->avail_in = stream->avail_in;
+ 	req->next_out = stream->next_out;
+ 	req->avail_out = stream->avail_out;
+-	return 0;
++	return ret;
+ }
+ 
+ 
+@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct
+ 		return -EINVAL;
+ 	}
+ 
++	ret = req->avail_out - stream->avail_out;
+ 	pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+ 		 stream->avail_in, stream->avail_out,
+-		 req->avail_in - stream->avail_in,
+-		 req->avail_out - stream->avail_out);
++		 req->avail_in - stream->avail_in, ret);
+ 	req->next_in = stream->next_in;
+ 	req->avail_in = stream->avail_in;
+ 	req->next_out = stream->next_out;
+ 	req->avail_out = stream->avail_out;
+-	return 0;
++	return ret;
+ }
+ 
+ static int zlib_decompress_final(struct crypto_pcomp *tfm,
+@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct 
+ 		return -EINVAL;
+ 	}
+ 
++	ret = req->avail_out - stream->avail_out;
+ 	pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
+ 		 stream->avail_in, stream->avail_out,
+-		 req->avail_in - stream->avail_in,
+-		 req->avail_out - stream->avail_out);
++		 req->avail_in - stream->avail_in, ret);
+ 	req->next_in = stream->next_in;
+ 	req->avail_in = stream->avail_in;
+ 	req->next_out = stream->next_out;
+ 	req->avail_out = stream->avail_out;
+-	return 0;
++	return ret;
+ }
+ 
+ 

+ 234 - 0
target/linux/generic-2.6/patches-2.6.30/051-squashfs_pcomp.patch

@@ -0,0 +1,234 @@
+--- a/fs/squashfs/Kconfig
++++ b/fs/squashfs/Kconfig
+@@ -1,7 +1,8 @@
+ config SQUASHFS
+ 	tristate "SquashFS 4.0 - Squashed file system support"
+ 	depends on BLOCK
+-	select ZLIB_INFLATE
++	select CRYPTO
++	select CRYPTO_ZLIB
+ 	help
+ 	  Saying Y here includes support for SquashFS 4.0 (a Compressed
+ 	  Read-Only File System).  Squashfs is a highly compressed read-only
+--- a/fs/squashfs/block.c
++++ b/fs/squashfs/block.c
+@@ -32,7 +32,8 @@
+ #include <linux/mutex.h>
+ #include <linux/string.h>
+ #include <linux/buffer_head.h>
+-#include <linux/zlib.h>
++
++#include <crypto/compress.h>
+ 
+ #include "squashfs_fs.h"
+ #include "squashfs_fs_sb.h"
+@@ -153,7 +154,8 @@ int squashfs_read_data(struct super_bloc
+ 	}
+ 
+ 	if (compressed) {
+-		int zlib_err = 0, zlib_init = 0;
++		int res = 0, decomp_init = 0;
++		struct comp_request req;
+ 
+ 		/*
+ 		 * Uncompress block.
+@@ -161,12 +163,13 @@ int squashfs_read_data(struct super_bloc
+ 
+ 		mutex_lock(&msblk->read_data_mutex);
+ 
+-		msblk->stream.avail_out = 0;
+-		msblk->stream.avail_in = 0;
++		req.avail_out = 0;
++		req.avail_in = 0;
+ 
+ 		bytes = length;
++		length = 0;
+ 		do {
+-			if (msblk->stream.avail_in == 0 && k < b) {
++			if (req.avail_in == 0 && k < b) {
+ 				avail = min(bytes, msblk->devblksize - offset);
+ 				bytes -= avail;
+ 				wait_on_buffer(bh[k]);
+@@ -179,45 +182,47 @@ int squashfs_read_data(struct super_bloc
+ 					continue;
+ 				}
+ 
+-				msblk->stream.next_in = bh[k]->b_data + offset;
+-				msblk->stream.avail_in = avail;
++				req.next_in = bh[k]->b_data + offset;
++				req.avail_in = avail;
+ 				offset = 0;
+ 			}
+ 
+-			if (msblk->stream.avail_out == 0 && page < pages) {
+-				msblk->stream.next_out = buffer[page++];
+-				msblk->stream.avail_out = PAGE_CACHE_SIZE;
++			if (req.avail_out == 0 && page < pages) {
++				req.next_out = buffer[page++];
++				req.avail_out = PAGE_CACHE_SIZE;
+ 			}
+ 
+-			if (!zlib_init) {
+-				zlib_err = zlib_inflateInit(&msblk->stream);
+-				if (zlib_err != Z_OK) {
+-					ERROR("zlib_inflateInit returned"
+-						" unexpected result 0x%x,"
+-						" srclength %d\n", zlib_err,
+-						srclength);
++			if (!decomp_init) {
++				res = crypto_decompress_init(msblk->tfm);
++				if (res) {
++					ERROR("crypto_decompress_init "
++						"returned %d, srclength %d\n",
++						res, srclength);
+ 					goto release_mutex;
+ 				}
+-				zlib_init = 1;
++				decomp_init = 1;
+ 			}
+ 
+-			zlib_err = zlib_inflate(&msblk->stream, Z_SYNC_FLUSH);
++			res = crypto_decompress_update(msblk->tfm, &req);
++			if (res < 0) {
++				ERROR("crypto_decompress_update returned %d, "
++					"data probably corrupt\n", res);
++				goto release_mutex;
++			}
++			length += res;
+ 
+-			if (msblk->stream.avail_in == 0 && k < b)
++			if (req.avail_in == 0 && k < b)
+ 				put_bh(bh[k++]);
+-		} while (zlib_err == Z_OK);
++		} while (bytes || res);
+ 
+-		if (zlib_err != Z_STREAM_END) {
+-			ERROR("zlib_inflate error, data probably corrupt\n");
++		res = crypto_decompress_final(msblk->tfm, &req);
++		if (res < 0) {
++			ERROR("crypto_decompress_final returned %d, data "
++				"probably corrupt\n", res);
+ 			goto release_mutex;
+ 		}
++		length += res;
+ 
+-		zlib_err = zlib_inflateEnd(&msblk->stream);
+-		if (zlib_err != Z_OK) {
+-			ERROR("zlib_inflate error, data probably corrupt\n");
+-			goto release_mutex;
+-		}
+-		length = msblk->stream.total_out;
+ 		mutex_unlock(&msblk->read_data_mutex);
+ 	} else {
+ 		/*
+--- a/fs/squashfs/squashfs_fs_sb.h
++++ b/fs/squashfs/squashfs_fs_sb.h
+@@ -64,7 +64,7 @@ struct squashfs_sb_info {
+ 	struct mutex		read_data_mutex;
+ 	struct mutex		meta_index_mutex;
+ 	struct meta_index	*meta_index;
+-	z_stream		stream;
++	struct crypto_pcomp	*tfm;
+ 	__le64			*inode_lookup_table;
+ 	u64			inode_table;
+ 	u64			directory_table;
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -37,11 +37,19 @@
+ #include <linux/zlib.h>
+ #include <linux/magic.h>
+ 
++#include <crypto/compress.h>
++
++#include <net/netlink.h>
++
+ #include "squashfs_fs.h"
+ #include "squashfs_fs_sb.h"
+ #include "squashfs_fs_i.h"
+ #include "squashfs.h"
+ 
++
++#define SQUASHFS_CRYPTO_ALG	"zlib"
++
++
+ static struct file_system_type squashfs_fs_type;
+ static struct super_operations squashfs_super_ops;
+ 
+@@ -75,6 +83,16 @@ static int squashfs_fill_super(struct su
+ 	unsigned short flags;
+ 	unsigned int fragments;
+ 	u64 lookup_table_start;
++	struct {
++		struct nlattr nla;
++		int val;
++	} params = {
++		.nla = {
++			.nla_len	= nla_attr_size(sizeof(int)),
++			.nla_type	= ZLIB_DECOMP_WINDOWBITS,
++		},
++		.val			= DEF_WBITS,
++	};
+ 	int err;
+ 
+ 	TRACE("Entered squashfs_fill_superblock\n");
+@@ -86,16 +104,25 @@ static int squashfs_fill_super(struct su
+ 	}
+ 	msblk = sb->s_fs_info;
+ 
+-	msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(),
+-		GFP_KERNEL);
+-	if (msblk->stream.workspace == NULL) {
+-		ERROR("Failed to allocate zlib workspace\n");
++	msblk->tfm = crypto_alloc_pcomp(SQUASHFS_CRYPTO_ALG, 0,
++					CRYPTO_ALG_ASYNC);
++	if (IS_ERR(msblk->tfm)) {
++		ERROR("Failed to load %s crypto module\n",
++		      SQUASHFS_CRYPTO_ALG);
++		err = PTR_ERR(msblk->tfm);
++		goto failed_pcomp;
++	}
++
++	err = crypto_decompress_setup(msblk->tfm, &params, sizeof(params));
++	if (err) {
++		ERROR("Failed to set up decompression parameters\n");
+ 		goto failure;
+ 	}
+ 
+ 	sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ 	if (sblk == NULL) {
+ 		ERROR("Failed to allocate squashfs_super_block\n");
++		err = -ENOMEM;
+ 		goto failure;
+ 	}
+ 
+@@ -284,17 +311,18 @@ failed_mount:
+ 	kfree(msblk->inode_lookup_table);
+ 	kfree(msblk->fragment_index);
+ 	kfree(msblk->id_table);
+-	kfree(msblk->stream.workspace);
++	crypto_free_pcomp(msblk->tfm);
+ 	kfree(sb->s_fs_info);
+ 	sb->s_fs_info = NULL;
+ 	kfree(sblk);
+ 	return err;
+ 
+ failure:
+-	kfree(msblk->stream.workspace);
++	crypto_free_pcomp(msblk->tfm);
++failed_pcomp:
+ 	kfree(sb->s_fs_info);
+ 	sb->s_fs_info = NULL;
+-	return -ENOMEM;
++	return err;
+ }
+ 
+ 
+@@ -333,7 +361,7 @@ static void squashfs_put_super(struct su
+ 		kfree(sbi->id_table);
+ 		kfree(sbi->fragment_index);
+ 		kfree(sbi->meta_index);
+-		kfree(sbi->stream.workspace);
++		crypto_free_pcomp(sbi->tfm);
+ 		kfree(sb->s_fs_info);
+ 		sb->s_fs_info = NULL;
+ 	}

+ 821 - 0
target/linux/generic-2.6/patches-2.6.30/052-pcomp_lzma_support.patch

@@ -0,0 +1,821 @@
+--- /dev/null
++++ b/crypto/unlzma.c
+@@ -0,0 +1,710 @@
++/*
++ * LZMA uncompresion module for pcomp
++ * Copyright (C) 2009  Felix Fietkau <[email protected]>
++ *
++ * Based on:
++ *  Initial Linux kernel adaptation
++ *  Copyright (C) 2006  Alain < [email protected] >
++ *
++ *  Based on small lzma deflate implementation/Small range coder
++ *  implementation for lzma.
++ *  Copyright (C) 2006  Aurelien Jacobs < [email protected] >
++ *
++ *  Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
++ *  Copyright (C) 1999-2005  Igor Pavlov
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ *
++ * FIXME: the current implementation assumes that the caller will
++ * not free any output buffers until the whole decompression has been
++ * completed. This is necessary, because LZMA looks back at old output
++ * instead of doing a separate dictionary allocation, which saves RAM.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/interrupt.h>
++#include <linux/mm.h>
++#include <linux/net.h>
++#include <linux/slab.h>
++#include <linux/kthread.h>
++
++#include <crypto/internal/compress.h>
++#include "unlzma.h"
++
++static int instance = 0;
++
++struct unlzma_buffer {
++	struct unlzma_buffer *last;
++	int offset;
++	int size;
++	u8 *ptr;
++};
++
++struct unlzma_ctx {
++	struct task_struct *thread;
++	wait_queue_head_t next_req;
++	struct mutex mutex;
++	bool active;
++	bool cancel;
++
++	const u8 *next_in;
++	int avail_in;
++
++	u8 *next_out;
++	int avail_out;
++
++	/* reader state */
++	u32 code;
++	u32 range;
++	u32 bound;
++
++	/* writer state */
++	u8 previous_byte;
++	ssize_t pos;
++	struct unlzma_buffer *head;
++
++	/* cstate */
++	int state;
++	u32 rep0, rep1, rep2, rep3;
++
++	u32 dict_size;
++
++	void *workspace;
++	int workspace_size;
++};
++
++static inline bool
++unlzma_should_stop(struct unlzma_ctx *ctx)
++{
++	return unlikely(kthread_should_stop() || ctx->cancel);
++}
++
++static void
++unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail)
++{
++	mutex_unlock(&ctx->mutex);
++	wait_event(ctx->next_req, unlzma_should_stop(ctx) || (*avail > 0));
++	mutex_lock(&ctx->mutex);
++}
++
++static u8
++rc_read(struct unlzma_ctx *ctx)
++{
++	if (unlikely(ctx->avail_in <= 0))
++		unlzma_request_buffer(ctx, &ctx->avail_in);
++
++	if (unlzma_should_stop(ctx))
++		return 0;
++
++	ctx->avail_in--;
++	return *(ctx->next_in++);
++}
++
++
++static inline void
++rc_get_code(struct unlzma_ctx *ctx)
++{
++	ctx->code = (ctx->code << 8) | rc_read(ctx);
++}
++
++static void
++rc_normalize(struct unlzma_ctx *ctx)
++{
++	if (ctx->range < (1 << RC_TOP_BITS)) {
++		ctx->range <<= 8;
++		rc_get_code(ctx);
++	}
++}
++
++static int
++rc_is_bit_0(struct unlzma_ctx *ctx, u16 *p)
++{
++	rc_normalize(ctx);
++	ctx->bound = *p * (ctx->range >> RC_MODEL_TOTAL_BITS);
++	return ctx->code < ctx->bound;
++}
++
++static void
++rc_update_bit_0(struct unlzma_ctx *ctx, u16 *p)
++{
++	ctx->range = ctx->bound;
++	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
++}
++
++static void
++rc_update_bit_1(struct unlzma_ctx *ctx, u16 *p)
++{
++	ctx->range -= ctx->bound;
++	ctx->code -= ctx->bound;
++	*p -= *p >> RC_MOVE_BITS;
++}
++
++static bool
++rc_get_bit(struct unlzma_ctx *ctx, u16 *p, int *symbol)
++{
++	if (rc_is_bit_0(ctx, p)) {
++		rc_update_bit_0(ctx, p);
++		*symbol *= 2;
++		return 0;
++	} else {
++		rc_update_bit_1(ctx, p);
++		*symbol = *symbol * 2 + 1;
++		return 1;
++	}
++}
++
++static int
++rc_direct_bit(struct unlzma_ctx *ctx)
++{
++	rc_normalize(ctx);
++	ctx->range >>= 1;
++	if (ctx->code >= ctx->range) {
++		ctx->code -= ctx->range;
++		return 1;
++	}
++	return 0;
++}
++
++static void
++rc_bit_tree_decode(struct unlzma_ctx *ctx, u16 *p, int num_levels, int *symbol)
++{
++	int i = num_levels;
++
++	*symbol = 1;
++	while (i--)
++		rc_get_bit(ctx, p + *symbol, symbol);
++	*symbol -= 1 << num_levels;
++}
++
++static u8
++peek_old_byte(struct unlzma_ctx *ctx, u32 offs)
++{
++	struct unlzma_buffer *bh = ctx->head;
++	u32 pos;
++
++	pos = ctx->pos - offs;
++	if (pos >= ctx->dict_size) {
++		pos = (~pos % ctx->dict_size);
++	}
++
++	while (bh->offset > pos) {
++		bh = bh->last;
++		if (!bh)
++			return 0;
++	}
++
++	pos -= bh->offset;
++	if (pos > bh->size)
++		return 0;
++
++	return bh->ptr[pos];
++}
++
++static void
++get_buffer(struct unlzma_ctx *ctx)
++{
++	struct unlzma_buffer *bh;
++
++	bh = kzalloc(sizeof(struct unlzma_buffer), GFP_KERNEL);
++	bh->ptr = ctx->next_out;
++	bh->offset = ctx->pos;
++	bh->last = ctx->head;
++	bh->size = ctx->avail_out;
++	ctx->head = bh;
++}
++
++static void
++write_byte(struct unlzma_ctx *ctx, u8 byte)
++{
++	if (unlikely(ctx->avail_out <= 0)) {
++		unlzma_request_buffer(ctx, &ctx->avail_out);
++		get_buffer(ctx);
++	}
++
++	if (!ctx->avail_out)
++		return;
++
++	ctx->previous_byte = byte;
++	*(ctx->next_out++) = byte;
++	ctx->avail_out--;
++	ctx->pos++;
++}
++
++
++static inline void
++copy_byte(struct unlzma_ctx *ctx, u32 offs)
++{
++	write_byte(ctx, peek_old_byte(ctx, offs));
++}
++
++static void
++copy_bytes(struct unlzma_ctx *ctx, u32 rep0, int len)
++{
++	do {
++		copy_byte(ctx, rep0);
++		len--;
++		if (unlzma_should_stop(ctx))
++			break;
++	} while (len != 0);
++}
++
++static void
++process_bit0(struct unlzma_ctx *ctx, u16 *p, int pos_state, u16 *prob,
++             int lc, u32 literal_pos_mask)
++{
++	int mi = 1;
++	rc_update_bit_0(ctx, prob);
++	prob = (p + LZMA_LITERAL +
++		(LZMA_LIT_SIZE
++		 * (((ctx->pos & literal_pos_mask) << lc)
++		    + (ctx->previous_byte >> (8 - lc))))
++		);
++
++	if (ctx->state >= LZMA_NUM_LIT_STATES) {
++		int match_byte = peek_old_byte(ctx, ctx->rep0);
++		do {
++			u16 bit;
++			u16 *prob_lit;
++
++			match_byte <<= 1;
++			bit = match_byte & 0x100;
++			prob_lit = prob + 0x100 + bit + mi;
++			if (rc_get_bit(ctx, prob_lit, &mi) != !!bit)
++				break;
++		} while (mi < 0x100);
++	}
++	while (mi < 0x100) {
++		u16 *prob_lit = prob + mi;
++		rc_get_bit(ctx, prob_lit, &mi);
++	}
++	write_byte(ctx, mi);
++	if (ctx->state < 4)
++		ctx->state = 0;
++	else if (ctx->state < 10)
++		ctx->state -= 3;
++	else
++		ctx->state -= 6;
++}
++
++static void
++process_bit1(struct unlzma_ctx *ctx, u16 *p, int pos_state, u16 *prob)
++{
++	int offset;
++	u16 *prob_len;
++	int num_bits;
++	int len;
++
++	rc_update_bit_1(ctx, prob);
++	prob = p + LZMA_IS_REP + ctx->state;
++	if (rc_is_bit_0(ctx, prob)) {
++		rc_update_bit_0(ctx, prob);
++		ctx->rep3 = ctx->rep2;
++		ctx->rep2 = ctx->rep1;
++		ctx->rep1 = ctx->rep0;
++		ctx->state = ctx->state < LZMA_NUM_LIT_STATES ? 0 : 3;
++		prob = p + LZMA_LEN_CODER;
++	} else {
++		rc_update_bit_1(ctx, prob);
++		prob = p + LZMA_IS_REP_G0 + ctx->state;
++		if (rc_is_bit_0(ctx, prob)) {
++			rc_update_bit_0(ctx, prob);
++			prob = (p + LZMA_IS_REP_0_LONG
++				+ (ctx->state <<
++				   LZMA_NUM_POS_BITS_MAX) +
++				pos_state);
++			if (rc_is_bit_0(ctx, prob)) {
++				rc_update_bit_0(ctx, prob);
++
++				ctx->state = ctx->state < LZMA_NUM_LIT_STATES ?
++					9 : 11;
++				copy_byte(ctx, ctx->rep0);
++				return;
++			} else {
++				rc_update_bit_1(ctx, prob);
++			}
++		} else {
++			u32 distance;
++
++			rc_update_bit_1(ctx, prob);
++			prob = p + LZMA_IS_REP_G1 + ctx->state;
++			if (rc_is_bit_0(ctx, prob)) {
++				rc_update_bit_0(ctx, prob);
++				distance = ctx->rep1;
++			} else {
++				rc_update_bit_1(ctx, prob);
++				prob = p + LZMA_IS_REP_G2 + ctx->state;
++				if (rc_is_bit_0(ctx, prob)) {
++					rc_update_bit_0(ctx, prob);
++					distance = ctx->rep2;
++				} else {
++					rc_update_bit_1(ctx, prob);
++					distance = ctx->rep3;
++					ctx->rep3 = ctx->rep2;
++				}
++				ctx->rep2 = ctx->rep1;
++			}
++			ctx->rep1 = ctx->rep0;
++			ctx->rep0 = distance;
++		}
++		ctx->state = ctx->state < LZMA_NUM_LIT_STATES ? 8 : 11;
++		prob = p + LZMA_REP_LEN_CODER;
++	}
++
++	prob_len = prob + LZMA_LEN_CHOICE;
++	if (rc_is_bit_0(ctx, prob_len)) {
++		rc_update_bit_0(ctx, prob_len);
++		prob_len = (prob + LZMA_LEN_LOW
++			    + (pos_state <<
++			       LZMA_LEN_NUM_LOW_BITS));
++		offset = 0;
++		num_bits = LZMA_LEN_NUM_LOW_BITS;
++	} else {
++		rc_update_bit_1(ctx, prob_len);
++		prob_len = prob + LZMA_LEN_CHOICE_2;
++		if (rc_is_bit_0(ctx, prob_len)) {
++			rc_update_bit_0(ctx, prob_len);
++			prob_len = (prob + LZMA_LEN_MID
++				    + (pos_state <<
++				       LZMA_LEN_NUM_MID_BITS));
++			offset = 1 << LZMA_LEN_NUM_LOW_BITS;
++			num_bits = LZMA_LEN_NUM_MID_BITS;
++		} else {
++			rc_update_bit_1(ctx, prob_len);
++			prob_len = prob + LZMA_LEN_HIGH;
++			offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
++				  + (1 << LZMA_LEN_NUM_MID_BITS));
++			num_bits = LZMA_LEN_NUM_HIGH_BITS;
++		}
++	}
++
++	rc_bit_tree_decode(ctx, prob_len, num_bits, &len);
++	len += offset;
++
++	if (ctx->state < 4) {
++		int pos_slot;
++
++		ctx->state += LZMA_NUM_LIT_STATES;
++		prob =
++			p + LZMA_POS_SLOT +
++			((len <
++			  LZMA_NUM_LEN_TO_POS_STATES ? len :
++			  LZMA_NUM_LEN_TO_POS_STATES - 1)
++			 << LZMA_NUM_POS_SLOT_BITS);
++		rc_bit_tree_decode(ctx, prob,
++				   LZMA_NUM_POS_SLOT_BITS,
++				   &pos_slot);
++		if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
++			int i, mi;
++			num_bits = (pos_slot >> 1) - 1;
++			ctx->rep0 = 2 | (pos_slot & 1);
++			if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
++				ctx->rep0 <<= num_bits;
++				prob = p + LZMA_SPEC_POS +
++					ctx->rep0 - pos_slot - 1;
++			} else {
++				num_bits -= LZMA_NUM_ALIGN_BITS;
++				while (num_bits--)
++					ctx->rep0 = (ctx->rep0 << 1) |
++						rc_direct_bit(ctx);
++				prob = p + LZMA_ALIGN;
++				ctx->rep0 <<= LZMA_NUM_ALIGN_BITS;
++				num_bits = LZMA_NUM_ALIGN_BITS;
++			}
++			i = 1;
++			mi = 1;
++			while (num_bits--) {
++				if (rc_get_bit(ctx, prob + mi, &mi))
++					ctx->rep0 |= i;
++				i <<= 1;
++			}
++		} else
++			ctx->rep0 = pos_slot;
++		if (++(ctx->rep0) == 0)
++			return;
++	}
++
++	len += LZMA_MATCH_MIN_LEN;
++
++	copy_bytes(ctx, ctx->rep0, len);
++}
++
++
++static int
++do_unlzma(struct unlzma_ctx *ctx)
++{
++	u8 hdr_buf[sizeof(struct lzma_header)];
++	struct lzma_header *header = (struct lzma_header *)hdr_buf;
++	u32 pos_state_mask;
++	u32 literal_pos_mask;
++	int lc, pb, lp;
++	int num_probs;
++	int i, mi;
++	u16 *p;
++
++	for (i = 0; i < sizeof(struct lzma_header); i++) {
++		hdr_buf[i] = rc_read(ctx);
++	}
++
++	ctx->pos = 0;
++	get_buffer(ctx);
++	ctx->active = true;
++	ctx->state = 0;
++	ctx->rep0 = ctx->rep1 = ctx->rep2 = ctx->rep3 = 1;
++
++	ctx->previous_byte = 0;
++	ctx->code = 0;
++	ctx->range = 0xFFFFFFFF;
++
++	ctx->dict_size = le32_to_cpu(header->dict_size);
++
++	if (header->pos >= (9 * 5 * 5))
++		return -1;
++
++	mi = 0;
++	lc = header->pos;
++	while (lc >= 9) {
++		mi++;
++		lc -= 9;
++	}
++	pb = 0;
++	lp = mi;
++	while (lp >= 5) {
++		pb++;
++		lp -= 5;
++	}
++	pos_state_mask = (1 << pb) - 1;
++	literal_pos_mask = (1 << lp) - 1;
++
++	if (ctx->dict_size == 0)
++		ctx->dict_size = 1;
++
++	num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
++	if (ctx->workspace_size < num_probs * sizeof(*p)) {
++		if (ctx->workspace)
++			vfree(ctx->workspace);
++		ctx->workspace = vmalloc(num_probs * sizeof(*p));
++	}
++	p = (u16 *) ctx->workspace;
++	if (!p)
++		return -1;
++
++	num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
++	for (i = 0; i < num_probs; i++)
++		p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
++
++	for (i = 0; i < 5; i++)
++		rc_get_code(ctx);
++
++	while (1) {
++		int pos_state =	ctx->pos & pos_state_mask;
++		u16 *prob = p + LZMA_IS_MATCH +
++			(ctx->state << LZMA_NUM_POS_BITS_MAX) + pos_state;
++		if (rc_is_bit_0(ctx, prob))
++			process_bit0(ctx, p, pos_state, prob,
++				     lc, literal_pos_mask);
++		else {
++			process_bit1(ctx, p, pos_state, prob);
++			if (ctx->rep0 == 0)
++				break;
++		}
++		if (unlzma_should_stop(ctx))
++			break;
++	}
++
++	return ctx->pos;
++}
++
++
++static void
++unlzma_reset_buf(struct unlzma_ctx *ctx)
++{
++	ctx->avail_in = 0;
++	ctx->next_in = NULL;
++	ctx->avail_out = 0;
++	ctx->next_out = NULL;
++}
++
++static int
++unlzma_thread(void *data)
++{
++	struct unlzma_ctx *ctx = data;
++
++	mutex_lock(&ctx->mutex);
++	do {
++		if (do_unlzma(ctx) < 0)
++			ctx->pos = 0;
++		unlzma_reset_buf(ctx);
++		ctx->cancel = false;
++		ctx->active = false;
++		while (ctx->head) {
++			struct unlzma_buffer *bh = ctx->head;
++			ctx->head = bh->last;
++			kfree(bh);
++		}
++	} while (!kthread_should_stop());
++	mutex_unlock(&ctx->mutex);
++	return 0;
++}
++
++
++static int
++unlzma_init(struct crypto_tfm *tfm)
++{
++	return 0;
++}
++
++static void
++unlzma_cancel(struct unlzma_ctx *ctx)
++{
++	unlzma_reset_buf(ctx);
++
++	if (!ctx->active)
++		return;
++
++	ctx->cancel = true;
++	do {
++		mutex_unlock(&ctx->mutex);
++		wake_up(&ctx->next_req);
++		schedule();
++		mutex_lock(&ctx->mutex);
++	} while (ctx->cancel);
++}
++
++
++static void
++unlzma_exit(struct crypto_tfm *tfm)
++{
++	struct unlzma_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	if (ctx->thread) {
++		unlzma_cancel(ctx);
++		kthread_stop(ctx->thread);
++		ctx->thread = NULL;
++	}
++}
++
++static int
++unlzma_decompress_setup(struct crypto_pcomp *tfm, void *p, unsigned int len)
++{
++	struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
++	int ret = 0;
++
++	if (ctx->thread)
++		return 0;
++
++	mutex_init(&ctx->mutex);
++	init_waitqueue_head(&ctx->next_req);
++	ctx->thread = kthread_run(unlzma_thread, ctx, "unlzma/%d", instance++);
++	if (IS_ERR(ctx->thread)) {
++		ret = PTR_ERR(ctx->thread);
++		ctx->thread = NULL;
++	}
++
++	return ret;
++}
++
++static int
++unlzma_decompress_init(struct crypto_pcomp *tfm)
++{
++	struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
++
++	ctx->pos = 0;
++	return 0;
++}
++
++static void
++unlzma_wait_complete(struct unlzma_ctx *ctx, bool finish)
++{
++	do {
++		mutex_unlock(&ctx->mutex);
++		wake_up(&ctx->next_req);
++		schedule();
++		mutex_lock(&ctx->mutex);
++	} while (ctx->active &&	(ctx->avail_in > 0) && (ctx->avail_out > 0));
++}
++
++static int
++unlzma_decompress_update(struct crypto_pcomp *tfm, struct comp_request *req)
++{
++	struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
++	size_t pos = 0;
++
++	mutex_lock(&ctx->mutex);
++	if (!ctx->active && !req->avail_in)
++		goto out;
++
++	pos = ctx->pos;
++	ctx->next_in = req->next_in;
++	ctx->avail_in = req->avail_in;
++	ctx->next_out = req->next_out;
++	ctx->avail_out = req->avail_out;
++
++	unlzma_wait_complete(ctx, false);
++
++	req->next_in = ctx->next_in;
++	req->avail_in = ctx->avail_in;
++	req->next_out = ctx->next_out;
++	req->avail_out = ctx->avail_out;
++	pos = ctx->pos - pos;
++
++out:
++	mutex_unlock(&ctx->mutex);
++	return pos;
++}
++
++static int
++unlzma_decompress_final(struct crypto_pcomp *tfm, struct comp_request *req)
++{
++	struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
++	int ret = 0;
++
++	/* cancel pending operation */
++	mutex_lock(&ctx->mutex);
++	if (ctx->active) {
++		// ret = -EINVAL;
++		unlzma_cancel(ctx);
++	}
++	ctx->pos = 0;
++	mutex_unlock(&ctx->mutex);
++	return ret;
++}
++
++
++static struct pcomp_alg unlzma_alg = {
++	.decompress_setup	= unlzma_decompress_setup,
++	.decompress_init	= unlzma_decompress_init,
++	.decompress_update	= unlzma_decompress_update,
++	.decompress_final	= unlzma_decompress_final,
++
++	.base			= {
++		.cra_name	= "lzma",
++		.cra_flags	= CRYPTO_ALG_TYPE_PCOMPRESS,
++		.cra_ctxsize	= sizeof(struct unlzma_ctx),
++		.cra_module	= THIS_MODULE,
++		.cra_init	= unlzma_init,
++		.cra_exit	= unlzma_exit,
++	}
++};
++
++static int __init
++unlzma_mod_init(void)
++{
++	return crypto_register_pcomp(&unlzma_alg);
++}
++
++static void __exit
++unlzma_mod_exit(void)
++{
++	crypto_unregister_pcomp(&unlzma_alg);
++}
++
++module_init(unlzma_mod_init);
++module_exit(unlzma_mod_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("LZMA Decompression Algorithm");
++MODULE_AUTHOR("Felix Fietkau <[email protected]>");
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -728,6 +728,12 @@ config CRYPTO_ZLIB
+ 	help
+ 	  This is the zlib algorithm.
+ 
++config CRYPTO_UNLZMA
++	tristate "LZMA decompression"
++	select CRYPTO_PCOMP
++	help
++	  This is the lzma decompression module.
++
+ config CRYPTO_LZO
+ 	tristate "LZO compression algorithm"
+ 	select CRYPTO_ALGAPI
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -73,6 +73,7 @@ obj-$(CONFIG_CRYPTO_SEED) += seed.o
+ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
+ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
+ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
++obj-$(CONFIG_CRYPTO_UNLZMA) += unlzma.o
+ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
+ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
+--- /dev/null
++++ b/crypto/unlzma.h
+@@ -0,0 +1,80 @@
++/* LZMA uncompresion module for pcomp
++ * Copyright (C) 2009  Felix Fietkau <[email protected]>
++ *
++ * Based on:
++ *  Initial Linux kernel adaptation
++ *  Copyright (C) 2006  Alain < [email protected] >
++ *
++ *  Based on small lzma deflate implementation/Small range coder
++ *  implementation for lzma.
++ *  Copyright (C) 2006  Aurelien Jacobs < [email protected] >
++ *
++ *  Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
++ *  Copyright (C) 1999-2005  Igor Pavlov
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++#ifndef __UNLZMA_H
++#define __UNLZMA_H
++
++struct lzma_header {
++	__u8 pos;
++	__le32 dict_size;
++} __attribute__ ((packed)) ;
++
++
++#define RC_TOP_BITS 24
++#define RC_MOVE_BITS 5
++#define RC_MODEL_TOTAL_BITS 11
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LZMA_NUM_POS_BITS_MAX 4
++
++#define LZMA_LEN_NUM_LOW_BITS 3
++#define LZMA_LEN_NUM_MID_BITS 3
++#define LZMA_LEN_NUM_HIGH_BITS 8
++
++#define LZMA_LEN_CHOICE 0
++#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
++#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
++#define LZMA_LEN_MID (LZMA_LEN_LOW \
++		      + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
++#define LZMA_LEN_HIGH (LZMA_LEN_MID \
++		       +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
++#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
++
++#define LZMA_NUM_STATES 12
++#define LZMA_NUM_LIT_STATES 7
++
++#define LZMA_START_POS_MODEL_INDEX 4
++#define LZMA_END_POS_MODEL_INDEX 14
++#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
++
++#define LZMA_NUM_POS_SLOT_BITS 6
++#define LZMA_NUM_LEN_TO_POS_STATES 4
++
++#define LZMA_NUM_ALIGN_BITS 4
++
++#define LZMA_MATCH_MIN_LEN 2
++
++#define LZMA_IS_MATCH 0
++#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
++#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
++#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
++#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
++#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
++#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
++		       + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
++#define LZMA_SPEC_POS (LZMA_POS_SLOT \
++		       +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
++#define LZMA_ALIGN (LZMA_SPEC_POS \
++		    + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
++#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
++#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
++#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
++
++#endif

+ 214 - 0
target/linux/generic-2.6/patches-2.6.30/053-squashfs_lzma.patch

@@ -0,0 +1,214 @@
+--- a/fs/squashfs/Kconfig
++++ b/fs/squashfs/Kconfig
+@@ -2,7 +2,6 @@ config SQUASHFS
+ 	tristate "SquashFS 4.0 - Squashed file system support"
+ 	depends on BLOCK
+ 	select CRYPTO
+-	select CRYPTO_ZLIB
+ 	help
+ 	  Saying Y here includes support for SquashFS 4.0 (a Compressed
+ 	  Read-Only File System).  Squashfs is a highly compressed read-only
+@@ -37,6 +36,26 @@ config SQUASHFS_EMBEDDED
+ 
+ 	  If unsure, say N.
+ 
++config SQUASHFS_SUPPORT_ZLIB
++	bool
++	prompt "Support ZLIB compression" if SQUASHFS_SUPPORT_LZMA
++	depends on SQUASHFS
++	select CRYPTO_ZLIB
++	default y
++	help
++	  ZLIB is the default compression used in squashfs. If you are
++	  using LZMA compression instead, you can remove support for ZLIB
++	  entirely.
++
++config SQUASHFS_SUPPORT_LZMA
++	bool "Support LZMA compression"
++	depends on SQUASHFS
++	select CRYPTO_LZMA
++	help
++	  By default SquashFS uses ZLIB compression, however (if your tools
++	  support it, you can use LZMA instead, which saves space.
++
++
+ config SQUASHFS_FRAGMENT_CACHE_SIZE
+ 	int "Number of fragments cached" if SQUASHFS_EMBEDDED
+ 	depends on SQUASHFS
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -212,6 +212,7 @@ struct meta_index {
+  * definitions for structures on disk
+  */
+ #define ZLIB_COMPRESSION	 1
++#define LZMA_COMPRESSION	 2
+ 
+ struct squashfs_super_block {
+ 	__le32			s_magic;
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -47,13 +47,65 @@
+ #include "squashfs.h"
+ 
+ 
+-#define SQUASHFS_CRYPTO_ALG	"zlib"
++static int squashfs_setup_zlib(struct squashfs_sb_info *msblk)
++{
++	int err = -EOPNOTSUPP;
++
++#ifdef CONFIG_SQUASHFS_SUPPORT_ZLIB
++	struct {
++		struct nlattr nla;
++		int val;
++	} params = {
++		.nla = {
++			.nla_len	= nla_attr_size(sizeof(int)),
++			.nla_type	= ZLIB_DECOMP_WINDOWBITS,
++		},
++		.val			= DEF_WBITS,
++	};
++
++	msblk->tfm = crypto_alloc_pcomp("zlib", 0,
++					CRYPTO_ALG_ASYNC);
++	if (IS_ERR(msblk->tfm)) {
++		ERROR("Failed to load zlib crypto module\n");
++		return PTR_ERR(msblk->tfm);
++	}
++
++	err = crypto_decompress_setup(msblk->tfm, &params, sizeof(params));
++	if (err) {
++		ERROR("Failed to set up decompression parameters\n");
++		crypto_free_pcomp(msblk->tfm);
++	}
++#endif
+ 
++	return err;
++}
++
++static int squashfs_setup_lzma(struct squashfs_sb_info *msblk)
++{
++	int err = -EOPNOTSUPP;
++
++#ifdef CONFIG_SQUASHFS_SUPPORT_LZMA
++	msblk->tfm = crypto_alloc_pcomp("lzma", 0,
++					CRYPTO_ALG_ASYNC);
++	if (IS_ERR(msblk->tfm)) {
++		ERROR("Failed to load lzma crypto module\n");
++		return PTR_ERR(msblk->tfm);
++	}
++
++	err = crypto_decompress_setup(msblk->tfm, NULL, 0);
++	if (err) {
++		ERROR("Failed to set up decompression parameters\n");
++		crypto_free_pcomp(msblk->tfm);
++	}
++#endif
++
++	return err;
++}
+ 
+ static struct file_system_type squashfs_fs_type;
+ static struct super_operations squashfs_super_ops;
+ 
+-static int supported_squashfs_filesystem(short major, short minor, short comp)
++static int supported_squashfs_filesystem(short major, short minor)
+ {
+ 	if (major < SQUASHFS_MAJOR) {
+ 		ERROR("Major/Minor mismatch, older Squashfs %d.%d "
+@@ -66,9 +118,6 @@ static int supported_squashfs_filesystem
+ 		return -EINVAL;
+ 	}
+ 
+-	if (comp != ZLIB_COMPRESSION)
+-		return -EINVAL;
+-
+ 	return 0;
+ }
+ 
+@@ -83,16 +132,6 @@ static int squashfs_fill_super(struct su
+ 	unsigned short flags;
+ 	unsigned int fragments;
+ 	u64 lookup_table_start;
+-	struct {
+-		struct nlattr nla;
+-		int val;
+-	} params = {
+-		.nla = {
+-			.nla_len	= nla_attr_size(sizeof(int)),
+-			.nla_type	= ZLIB_DECOMP_WINDOWBITS,
+-		},
+-		.val			= DEF_WBITS,
+-	};
+ 	int err;
+ 
+ 	TRACE("Entered squashfs_fill_superblock\n");
+@@ -104,21 +143,6 @@ static int squashfs_fill_super(struct su
+ 	}
+ 	msblk = sb->s_fs_info;
+ 
+-	msblk->tfm = crypto_alloc_pcomp(SQUASHFS_CRYPTO_ALG, 0,
+-					CRYPTO_ALG_ASYNC);
+-	if (IS_ERR(msblk->tfm)) {
+-		ERROR("Failed to load %s crypto module\n",
+-		      SQUASHFS_CRYPTO_ALG);
+-		err = PTR_ERR(msblk->tfm);
+-		goto failed_pcomp;
+-	}
+-
+-	err = crypto_decompress_setup(msblk->tfm, &params, sizeof(params));
+-	if (err) {
+-		ERROR("Failed to set up decompression parameters\n");
+-		goto failure;
+-	}
+-
+ 	sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ 	if (sblk == NULL) {
+ 		ERROR("Failed to allocate squashfs_super_block\n");
+@@ -158,8 +182,21 @@ static int squashfs_fill_super(struct su
+ 
+ 	/* Check the MAJOR & MINOR versions and compression type */
+ 	err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major),
+-			le16_to_cpu(sblk->s_minor),
+-			le16_to_cpu(sblk->compression));
++			le16_to_cpu(sblk->s_minor));
++	if (err < 0)
++		goto failed_mount;
++
++	switch(le16_to_cpu(sblk->compression)) {
++	case ZLIB_COMPRESSION:
++		err = squashfs_setup_zlib(msblk);
++		break;
++	case LZMA_COMPRESSION:
++		err = squashfs_setup_lzma(msblk);
++		break;
++	default:
++		err = -EINVAL;
++		break;
++	}
+ 	if (err < 0)
+ 		goto failed_mount;
+ 
+@@ -305,21 +342,16 @@ allocate_root:
+ 	return 0;
+ 
+ failed_mount:
++	if (msblk->tfm)
++		crypto_free_pcomp(msblk->tfm);
+ 	squashfs_cache_delete(msblk->block_cache);
+ 	squashfs_cache_delete(msblk->fragment_cache);
+ 	squashfs_cache_delete(msblk->read_page);
+ 	kfree(msblk->inode_lookup_table);
+ 	kfree(msblk->fragment_index);
+ 	kfree(msblk->id_table);
+-	crypto_free_pcomp(msblk->tfm);
+-	kfree(sb->s_fs_info);
+-	sb->s_fs_info = NULL;
+ 	kfree(sblk);
+-	return err;
+-
+ failure:
+-	crypto_free_pcomp(msblk->tfm);
+-failed_pcomp:
+ 	kfree(sb->s_fs_info);
+ 	sb->s_fs_info = NULL;
+ 	return err;

+ 10 - 15
target/linux/ixp4xx/patches-2.6.30/205-npe_driver_separate_phy_functions.patch

@@ -81,38 +81,33 @@
  	int err;
  
  	if (!(dev = alloc_etherdev(sizeof(struct port))))
-@@ -1212,22 +1257,9 @@ static int __devinit eth_init_one(struct
+@@ -1207,18 +1252,10 @@ static int __devinit eth_init_one(struct
  	__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
  	udelay(50);
  
 -	snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
 -	port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
 -				   PHY_INTERFACE_MODE_MII);
--	if (IS_ERR(port->phydev)) {
--		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
--		return PTR_ERR(port->phydev);
--	}
--
+-	if ((err = IS_ERR(port->phydev)))
++	err = ixp4xx_phy_connect(dev);
++	if (err)
+ 		goto err_free_mem;
+ 
 -	/* mask with MAC supported features */
 -	port->phydev->supported &= PHY_BASIC_FEATURES;
 -	port->phydev->advertising = port->phydev->supported;
 -
 -	port->phydev->irq = PHY_POLL;
 -
--	printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
--	       npe_name(port->npe));
-+	err = ixp4xx_phy_connect(dev);
-+	if (err)
-+		goto err_unreg;
- 
- 	return 0;
+ 	if ((err = register_netdev(dev)))
+ 		goto err_phy_dis;
  
-@@ -1245,7 +1277,7 @@ static int __devexit eth_remove_one(stru
+@@ -1245,7 +1282,7 @@ static int __devexit eth_remove_one(stru
  	struct net_device *dev = platform_get_drvdata(pdev);
  	struct port *port = netdev_priv(dev);
  
 -	phy_disconnect(port->phydev);
 +	ixp4xx_phy_disconnect(dev);
  	unregister_netdev(dev);
+ 	phy_disconnect(port->phydev);
  	npe_port_tab[NPE_ID(port->id)] = NULL;
- 	platform_set_drvdata(pdev, NULL);