HashLib4CSharp - Comprehensive Hashing Library written in C#

So here is a version of the CRC32 computation using Span<T>:
Using Span and take out branches inside tight loop:
protected void LocalCrcCompute(uint[][] crcTable, byte[] data, int index, int length)
{
    if (data == null) throw new ArgumentNullHashLibException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    const int unroll = 4;
    const int bytesAtOnce = 16 * unroll;

    var crc = ~CurrentCRC;
    var leftovers = BitConverter.IsLittleEndian ? ComputeLittleEndianBlocks()
        : ComputeBigEndianBlocks();

    // remaining 1 to 63 bytes (standard algorithm)
    foreach (var b in leftovers)
        crc = (crc >> 8) ^ crcTable[0][(crc & 0xFF) ^ b];

    CurrentCRC = ~crc;

    ReadOnlySpan<byte> ComputeLittleEndianBlocks()
    {
        var dataSpan = data.AsSpan(index, length);
        while (dataSpan.Length >= bytesAtOnce)
        {
            var dataUints = MemoryMarshal.Cast<byte, uint>(dataSpan);
            for (int unrolling = 0; unrolling < unroll; unrolling++, dataUints = dataUints.Slice(4))
            {
                var one = dataUints[0] ^ crc;
                var two = dataUints[1];
                var three = dataUints[2];
                var four = dataUints[3];

                crc = crcTable[0][(four >> 24) & 0xFF] ^
                    crcTable[1][(four >> 16) & 0xFF] ^
                    crcTable[2][(four >> 8) & 0xFF] ^
                    crcTable[3][four & 0xFF] ^
                    crcTable[4][(three >> 24) & 0xFF] ^
                    crcTable[5][(three >> 16) & 0xFF] ^
                    crcTable[6][(three >> 8) & 0xFF] ^
                    crcTable[7][three & 0xFF] ^
                    crcTable[8][(two >> 24) & 0xFF] ^
                    crcTable[9][(two >> 16) & 0xFF] ^
                    crcTable[10][(two >> 8) & 0xFF] ^
                    crcTable[11][two & 0xFF] ^
                    crcTable[12][(one >> 24) & 0xFF] ^
                    crcTable[13][(one >> 16) & 0xFF] ^
                    crcTable[14][(one >> 8) & 0xFF] ^
                    crcTable[15][one & 0xFF];
            }

            dataSpan = dataSpan.Slice(bytesAtOnce);
        }
        return dataSpan;
    }

    ReadOnlySpan<byte> ComputeBigEndianBlocks()
    {
        var dataSpan = data.AsSpan(index, length);
        while (dataSpan.Length >= bytesAtOnce)
        {
            var dataUints = MemoryMarshal.Cast<byte, uint>(dataSpan);
            for (int unrolling = 0; unrolling < unroll; unrolling++, dataUints = dataUints.Slice(4))
            {
                var one = dataUints[0] ^ Bits.ReverseBytesUInt32(crc);
                var two = dataUints[1];
                var three = dataUints[2];
                var four = dataUints[3];

                crc = crcTable[0][four & 0xFF] ^
                    crcTable[1][(four >> 8) & 0xFF] ^
                    crcTable[2][(four >> 16) & 0xFF] ^
                    crcTable[3][(four >> 24) & 0xFF] ^
                    crcTable[4][three & 0xFF] ^
                    crcTable[5][(three >> 8) & 0xFF] ^
                    crcTable[6][(three >> 16) & 0xFF] ^
                    crcTable[7][(three >> 24) & 0xFF] ^
                    crcTable[8][two & 0xFF] ^
                    crcTable[9][(two >> 8) & 0xFF] ^
                    crcTable[10][(two >> 16) & 0xFF] ^
                    crcTable[11][(two >> 24) & 0xFF] ^
                    crcTable[12][one & 0xFF] ^
                    crcTable[13][(one >> 8) & 0xFF] ^
                    crcTable[14][(one >> 16) & 0xFF] ^
                    crcTable[15][(one >> 24) & 0xFF];
            }

            dataSpan = dataSpan.Slice(bytesAtOnce);
        }
        return dataSpan;
    }
}

and here is the original code that used unsafe pointers as well the branch inside the loop:
Original unsafe code with pointers and branch inside tight loop:
protected unsafe void LocalCrcCompute(uint[][] crcTable, byte[] data, int index,
                                      int length)
{
    if (data == null) throw new ArgumentNullHashLibException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    const int unroll = 4;
    const int bytesAtOnce = 16 * unroll;
    var crc = ~CurrentCRC;

    fixed (byte* dataPtr = data)
    {
        var srcPtr = (uint*) (dataPtr + index);
        while (length >= bytesAtOnce)
        {
            var unrolling = 0;
            while (unrolling < unroll)
            {
                var one = Converters.ReadPCardinalAsUInt32(srcPtr)
                    ^ Converters.le2me_32(crc);
                srcPtr++;
                var two = Converters.ReadPCardinalAsUInt32(srcPtr);
                srcPtr++;
                var three = Converters.ReadPCardinalAsUInt32(srcPtr);
                srcPtr++;
                var four = Converters.ReadPCardinalAsUInt32(srcPtr);
                srcPtr++;

                if (BitConverter.IsLittleEndian)
                {
                    crc = crcTable[0][(four >> 24) & 0xFF] ^ crcTable[1]
                        [(four >> 16) & 0xFF] ^ crcTable[2][(four >> 8) & 0xFF]
                        ^ crcTable[3][four & 0xFF] ^ crcTable[4]
                        [(three >> 24) & 0xFF] ^ crcTable[5][(three >> 16) & 0xFF]
                        ^ crcTable[6][(three >> 8) & 0xFF] ^ crcTable[7]
                        [three & 0xFF] ^ crcTable[8][(two >> 24) & 0xFF] ^ crcTable
                        [9][(two >> 16) & 0xFF] ^ crcTable[10][(two >> 8) & 0xFF]
                        ^ crcTable[11][two & 0xFF] ^ crcTable[12][(one >> 24) & 0xFF]
                        ^ crcTable[13][(one >> 16) & 0xFF] ^ crcTable[14]
                        [(one >> 8) & 0xFF] ^ crcTable[15][one & 0xFF];
                }
                else
                {
                    crc = crcTable[0][four & 0xFF] ^ crcTable[1]
                        [(four >> 8) & 0xFF] ^ crcTable[2][(four >> 16) & 0xFF]
                        ^ crcTable[3][(four >> 24) & 0xFF] ^ crcTable[4]
                        [three & 0xFF] ^ crcTable[5][(three >> 8) & 0xFF] ^ crcTable
                        [6][(three >> 16) & 0xFF] ^ crcTable[7][(three >> 24) & 0xFF]
                        ^ crcTable[8][two & 0xFF] ^ crcTable[9][(two >> 8) & 0xFF]
                        ^ crcTable[10][(two >> 16) & 0xFF] ^ crcTable[11]
                        [(two >> 24) & 0xFF] ^ crcTable[12][one & 0xFF] ^ crcTable
                        [13][(one >> 8) & 0xFF] ^ crcTable[14][(one >> 16) & 0xFF]
                        ^ crcTable[15][(one >> 24) & 0xFF];
                }

                unrolling++;
            }

            length -= bytesAtOnce;
        }

        var srcPtr2 = (byte*) srcPtr;
        // remaining 1 to 63 bytes (standard algorithm)
        while (length != 0)
        {
            crc = (crc >> 8) ^ crcTable[0][(crc & 0xFF) ^ *srcPtr2];
            srcPtr2++;
            length--;
        }

        CurrentCRC = ~crc;
    }
}

On my ancient machine, here's the relative throughput I got:
C#:
657 659
684 680
676 677
Timings on a old AMD system circa 2012:
                         Castagnoli PKZip  (in MB/s)
Original                    657      659
Original without branch     684      680
Span<T> without branch      676      677
 
@Skydiver , thanks a lot for this, from your benchmark I see that original without branch is faster than the Span<T> variant?
any chance you can share the code for original without branch please?
 
Yes, it is faster, but also more complex. Its still uses unsafe pointers. The reason I put in that statistic there was to show that using the Span<T> lets you have safer code, but the performance penalty is not that big.

See pull Request #4. The relevant code is:
C#:
protected unsafe void LocalCrcCompute(uint[][] crcTable, byte[] data, int index,
                                      int length)
{
    if (data == null) throw new ArgumentNullException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    const int unroll = 4;
    const int bytesAtOnce = 16 * unroll;
    var crc = ~CurrentCRC;

    if (BitConverter.IsLittleEndian)
        ComputeLittleEndianBlocks();
    else
        ComputeBigEndianBlocks();

    CurrentCRC = ~crc;

    void ComputeLittleEndianBlocks()
    {
        fixed (byte* dataPtr = data)
        {
            var srcPtr = (uint*)(dataPtr + index);
            while (length >= bytesAtOnce)
            {
                var unrolling = 0;
                while (unrolling < unroll)
                {
                    var one = Converters.ReadPCardinalAsUInt32(srcPtr) ^ crc;
                    srcPtr++;
                    var two = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;
                    var three = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;
                    var four = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;

                    crc = crcTable[0][(four >> 24) & 0xFF] ^ crcTable[1]
                        [(four >> 16) & 0xFF] ^ crcTable[2][(four >> 8) & 0xFF]
                        ^ crcTable[3][four & 0xFF] ^ crcTable[4]
                        [(three >> 24) & 0xFF] ^ crcTable[5][(three >> 16) & 0xFF]
                        ^ crcTable[6][(three >> 8) & 0xFF] ^ crcTable[7]
                        [three & 0xFF] ^ crcTable[8][(two >> 24) & 0xFF] ^ crcTable
                        [9][(two >> 16) & 0xFF] ^ crcTable[10][(two >> 8) & 0xFF]
                        ^ crcTable[11][two & 0xFF] ^ crcTable[12][(one >> 24) & 0xFF]
                        ^ crcTable[13][(one >> 16) & 0xFF] ^ crcTable[14]
                        [(one >> 8) & 0xFF] ^ crcTable[15][one & 0xFF];

                    unrolling++;
                }

                length -= bytesAtOnce;
            }

            var srcPtr2 = (byte*)srcPtr;
            // remaining 1 to 63 bytes (standard algorithm)
            while (length != 0)
            {
                crc = (crc >> 8) ^ crcTable[0][(crc & 0xFF) ^ *srcPtr2];
                srcPtr2++;
                length--;
            }
        }
    }


    void ComputeBigEndianBlocks()
    {
        fixed (byte* dataPtr = data)
        {
            var srcPtr = (uint*)(dataPtr + index);
            while (length >= bytesAtOnce)
            {
                var unrolling = 0;
                while (unrolling < unroll)
                {
                    var one = Converters.ReadPCardinalAsUInt32(srcPtr) ^ Bits.ReverseBytesUInt32(crc);
                    srcPtr++;
                    var two = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;
                    var three = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;
                    var four = Converters.ReadPCardinalAsUInt32(srcPtr);
                    srcPtr++;

                    crc = crcTable[0][four & 0xFF] ^ crcTable[1]
                        [(four >> 8) & 0xFF] ^ crcTable[2][(four >> 16) & 0xFF]
                        ^ crcTable[3][(four >> 24) & 0xFF] ^ crcTable[4]
                        [three & 0xFF] ^ crcTable[5][(three >> 8) & 0xFF] ^ crcTable
                        [6][(three >> 16) & 0xFF] ^ crcTable[7][(three >> 24) & 0xFF]
                        ^ crcTable[8][two & 0xFF] ^ crcTable[9][(two >> 8) & 0xFF]
                        ^ crcTable[10][(two >> 16) & 0xFF] ^ crcTable[11]
                        [(two >> 24) & 0xFF] ^ crcTable[12][one & 0xFF] ^ crcTable
                        [13][(one >> 8) & 0xFF] ^ crcTable[14][(one >> 16) & 0xFF]
                        ^ crcTable[15][(one >> 24) & 0xFF];

                    unrolling++;
                }

                length -= bytesAtOnce;
            }

            var srcPtr2 = (byte*)srcPtr;
            // remaining 1 to 63 bytes (standard algorithm)
            while (length != 0)
            {
                crc = (crc >> 8) ^ crcTable[0][(crc & 0xFF) ^ *srcPtr2];
                srcPtr2++;
                length--;
            }
        }
    }
}
 
@Skydiver, Okay I see, thanks for the pull request, I will merge pull request no 4 since I will favour performance for this case.
 
No problem. It's your library. I guess you'll just deal with any cases when someone figures out how to use your library to cause a buffer overwrite or an elevation of privilege when somebody reports it. Performance is king, that's why we still program in languages like C and C++... Security, smecurity... it's someone else's identity that gets stolen or machine the gets owned...
 
Okay got better performance with the Span<T>: 692 and 690 respectively:
C#:
[StructLayout(LayoutKind.Sequential, Pack = 4)]
struct Block
{
    public uint one;
    public uint two;
    public uint three;
    public uint four;
}

protected void LocalCrcCompute(uint[][] crcTable, byte[] data, int index, int length)
{
    if (data == null) throw new ArgumentNullException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    const int unroll = 4;
    const int bytesAtOnce = 16 * unroll;

    var crc = ~CurrentCRC;
    var leftovers = BitConverter.IsLittleEndian ? ComputeLittleEndianBlocks()
        : ComputeBigEndianBlocks();

    // remaining 1 to 63 bytes (standard algorithm)
    foreach (var b in leftovers)
        crc = (crc >> 8) ^ crcTable[0][(crc & 0xFF) ^ b];

    CurrentCRC = ~crc;

    ReadOnlySpan<byte> ComputeLittleEndianBlocks()
    {
        var dataSpan = data.AsSpan(index, length);
        int blockCount = length / bytesAtOnce;
        int bytesScanned = blockCount * bytesAtOnce;
        var blocks = MemoryMarshal.Cast<byte, Block>(dataSpan.Slice(0, bytesScanned));
        foreach(var block in blocks)
        {
            var one = block.one ^ crc;

            crc = crcTable[0][(block.four >> 24) & 0xFF] ^
                crcTable[1][(block.four >> 16) & 0xFF] ^
                crcTable[2][(block.four >> 8) & 0xFF] ^
                crcTable[3][block.four & 0xFF] ^
                crcTable[4][(block.three >> 24) & 0xFF] ^
                crcTable[5][(block.three >> 16) & 0xFF] ^
                crcTable[6][(block.three >> 8) & 0xFF] ^
                crcTable[7][block.three & 0xFF] ^
                crcTable[8][(block.two >> 24) & 0xFF] ^
                crcTable[9][(block.two >> 16) & 0xFF] ^
                crcTable[10][(block.two >> 8) & 0xFF] ^
                crcTable[11][block.two & 0xFF] ^
                crcTable[12][(one >> 24) & 0xFF] ^
                crcTable[13][(one >> 16) & 0xFF] ^
                crcTable[14][(one >> 8) & 0xFF] ^
                crcTable[15][one & 0xFF];
        }
        return dataSpan.Slice(bytesScanned);
    }

    ReadOnlySpan<byte> ComputeBigEndianBlocks()
    {
        var dataSpan = data.AsSpan(index, length);
        int blockCount = length / bytesAtOnce;
        int bytesScanned = blockCount * bytesAtOnce;
        var blocks = MemoryMarshal.Cast<byte, Block>(dataSpan.Slice(0, bytesScanned));
        foreach (var block in blocks)
        {
            var one = block.one ^ Bits.ReverseBytesUInt32(crc);

            crc = crcTable[0][block.four & 0xFF] ^
                crcTable[1][(block.four >> 8) & 0xFF] ^
                crcTable[2][(block.four >> 16) & 0xFF] ^
                crcTable[3][(block.four >> 24) & 0xFF] ^
                crcTable[4][block.three & 0xFF] ^
                crcTable[5][(block.three >> 8) & 0xFF] ^
                crcTable[6][(block.three >> 16) & 0xFF] ^
                crcTable[7][(block.three >> 24) & 0xFF] ^
                crcTable[8][block.two & 0xFF] ^
                crcTable[9][(block.two >> 8) & 0xFF] ^
                crcTable[10][(block.two >> 16) & 0xFF] ^
                crcTable[11][(block.two >> 24) & 0xFF] ^
                crcTable[12][one & 0xFF] ^
                crcTable[13][(one >> 8) & 0xFF] ^
                crcTable[14][(one >> 16) & 0xFF] ^
                crcTable[15][(one >> 24) & 0xFF];
        }
        return dataSpan.Slice(bytesScanned);
    }
}

Pull request coming shortly... :)
 
@Skydiver, thanks for your pull request, I have merged it.
and yes, it did improve performance.
I am open to any more suggestions/contributions from you so long as it doesn't hurt performance. :)
 
In which case you'll like the pull request coming later today. After I made the perf for CRC-32 faster, I noticed that it is now running faster that the Adler32. Adler32 was running at about 685 on my PC. When Mark Adler invented it, he wanted something much faster than CRC32. I've got it running at around 1100 now...
 
For those following the thread, but not GitHub, the original Adler32 code looked something like this:
Original:
public override void TransformBytes(byte[] data, int index, int length)
{
    if (data == null) throw new ArgumentNullException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    var a = _a;
    var b = _b;

    while (length > 0)
    {
        // We can defer the modulo operation:
        // a maximally grows from 65521 to 65521 + 255 * 3800
        // b maximally grows by 3800 * median(a) = 2090079800 < 2^31
        var n = 3800;
        if (n > length)
            n = length;

        length -= n;

        while (n - 1 >= 0)
        {
            a += data[index];
            b += a;
            index++;
            n--;
        }

        a %= ModAdler;
        b %= ModAdler;
    }

    _a = a;
    _b = b;
}
to give a throughput of about 685 MB/s.

Simply removing that branch that sets the value of n to this:
Do big blocks first.:
public override void TransformBytes(byte[] data, int index, int length)
{
    if (data == null) throw new ArgumentNullException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    var a = _a;
    var b = _b;

    // We can defer the modulo operation:
    // a maximally grows from 65521 to 65521 + 255 * 3800
    // b maximally grows by 3800 * median(a) = 2090079800 < 2^31
    const int bigBlockSize = 3800;

    var bigBlockCount = length / bigBlockSize;
    for(int i = 0; i < bigBlockCount; i++)
    {
        length -= bigBlockSize;

        for(int n = 0; n < bigBlockSize; n++)
        {
            a += data[index++];
            b += a;
        }

        a %= ModAdler;
        b %= ModAdler;
    }

    for(int n = 0; n < length; n++)
    {
        a += data[index++];
        b += a;
    }

    a %= ModAdler;
    b %= ModAdler;

    _a = a;
    _b = b;
}
to bump up the throughput to about 720 MB/s.

Introducing the use of ReadOnlySpan<byte> to speed things up:
Use ReadOnlySpan:
public override void TransformBytes(byte[] data, int index, int length)
{
    if (data == null) throw new ArgumentNullException(nameof(data));
    Debug.Assert(index >= 0);
    Debug.Assert(length >= 0);
    Debug.Assert(index + length <= data.Length);

    var a = _a;
    var b = _b;

    // We can defer the modulo operation:
    // a maximally grows from 65521 to 65521 + 255 * 3800
    // b maximally grows by 3800 * median(a) = 2090079800 < 2^31
    const int bigBlockSize = 3800;

    var buffer = new ReadOnlySpan<byte>(data, index, length);
    var bigBlockCount = length / bigBlockSize;
    for (int i = 0; i < bigBlockCount; i++)
    {
        foreach (var value in buffer.Slice(0, bigBlockSize))
        {
            a += value;
            b += a;
        }

        a %= ModAdler;
        b %= ModAdler;

        buffer = buffer.Slice(bigBlockSize);
    }

    foreach (var value in buffer)
    {
        a += value;
        b += a;
    }

    a %= ModAdler;
    b %= ModAdler;

    _a = a;
    _b = b;
}
to bump up the throughput to 925 MB/s.

Changing for (int i = 0; i < bigBlockCount; i++) to while (bigBlockCount-- > 0) does another bump up to about 1100 MB/s.

Then for 64-bit machines, the last bit of optimization is to replace the inner foreach highlighted above with code that reads 64 bits at a time from memory:
Read 64-bits at a time:
foreach (var word in MemoryMarshal.Cast<byte, ulong>(buffer.Slice(0, bigBlockSize)))
{
    var lo = (uint) word;
    a += lo & 0xFF;
    b += a;

    a += (lo >> 8) & 0xFF;
    b += a;

    a += (lo >> 16) & 0xFF;
    b += a;

    a += (lo >> 24) & 0xFF;
    b += a;

    var hi = (uint) (word >> 32);
    a += hi & 0xFF;
    b += a;

    a += (hi >> 8) & 0xFF;
    b += a;

    a += (hi >> 16) & 0xFF;
    b += a;

    a += (hi >> 24) & 0xFF;
    b += a;
}
To give another bump up to 1300 MB/s.

Now Adler-32 is correctly running much faster than CRC-32 which was the goal of the original designer and author.
 
@Skydiver , if this is ok by you, I will very much appreciate it if you can take a look at my Blake3 implementation, I have tried my best to get reasonable speed but to no avail.
Thanks.
 
Do you not think you are asking for a bit much for free?

Skydiver is an X Microsoft employee, and known for his craft as a professional software programmer. As a friend of mine; I can also vouch for his kind nature and willingess to help other people. (Often for free). Now I feel you are starting to milk the system. That system; being this forum. And you appear to be taking advantage of the the generosity of the contributions you've received already. You remind me of the Oliver Twist line : Please Sir, can I have some more? As a token of generosity, if you so wish to ask for more, you should consider offering a personal donation to Skydiver for his continued services for your project. And herein is why I suggest so :

You should note that this forum frowns upon people asking for code right up-front. And please remember that this website works on the basis that you show us what you've tried first, and we will show you some fixes for your source code thereafter. While we are here to help you. Providing you are chiming in and trying to improve your own source code too and not eliciting others to do all the work for you. There are limitations to how much help I am willing to allow any contributing member of this forum be subjected to such exploitation of their own generosity. Each of us are developers by craft and do this for a living. Our contributions are free and each user can contribute as much as they desire. While you may not have meant to; but please do not exploit that generosity.

While I am being blunt here, I say this with kindness too and with full understanding that you really do just want to improve your library. And I have no problem with that. And that's fine. However, I should also point out that this is a Vendor announcement forum, and not a support forum. In which case, you should consider starting a new topic for help with your Crypto Blake3 questions. You may also link to this topic from your new topic if its relevant.

Instead of closing this topic, I will leave it open so you can keep users of your library updated with new releases etc. I ask that you not use this thread for code improvement requests. As these should be posted in the support forums provided.

Hope you understand.
 
@Sheepings , thanks for your honest opinion, while I do very much appreciate @Skydiver for his contributions in the codebase, I wouldn't really be able to pay him for his services considering his expertise.
and no, I am not trying to portray the Oliver twist ideology here, sorry if my request came off that way.
My original ideology while sharing the code here was for the benefit of the C# community and hopefully get meaningful contributions to the project as it's opensource which @Skydiver has been very helpful with.
based on your request, I won't ask for anymore code improvements requests here again and to @Skydiver I apologize if my request came off the wrong way, and thanks for your help so far, I forever remain grateful.
Thanks.
 
Just so you are clear. The forums in which I refer to are these which this topic resides. Vendor announcements are for announcing projects. (just like yours.) And of course we are grateful for your contributions to share with the community. This forum is for you to update people watching this topic with new announcement of when you've made an update to your product. So I hope that you understand that troubleshooting issues in this forum is the wrong place for such discussions. I'm not denying you support by using the appropriate forums. Just try to remember what the purpose of the vendor announcements are for, and its not for code support. You can however use one of the other suitable forums if you have questions on improving your library.

As I said, you also need to show what you've attempted when posting to those other support forums.

Thanks for your understanding. (y)
 

Latest posts

Back
Top Bottom