cnpkg/ 0000700 0162572 0000145 00000000000 11740760303 011023 5 ustar jmutch mit cnpkg/cnpkg_hidden_back.h 0000600 0162572 0000145 00000003642 11526727652 014614 0 ustar jmutch mit // Backward pass for a single cell of a hidden layer. Computes the sensitivity. // Block size is usually [16 N] where N is found by experiment. See CNS manual. // N might need to be reduced for cards below GTX 285/295. #BLOCKSIZE 16 16 // Get handles to the next level and weight layer. This allows us to use the 'HANDLE' versions of macros, which // are somewhat faster. SENS_HANDLE hn = GET_COMPUTED_SENS_HANDLE(ZN ); WEIGHT_VAL_HANDLE hw = GET_WEIGHT_VAL_HANDLE (ZNW); // Get dimensions of weight array. int ySize = WEIGHT_VAL_HANDLE_Y_SIZE (hw); int xSize = WEIGHT_VAL_HANDLE_X_SIZE (hw); int dSize = WEIGHT_VAL_HANDLE_D_SIZE (hw); int nfSize = WEIGHT_VAL_HANDLE_NF_SIZE(hw); // Find our projective field in the next level. Note that we are doing a 'full' correlation here, so we need to // know the full projective field, as well as its valid portion. In the case they are different, some part of the // weight array will not be used. int vy1, vy2, y1, dummy; int vx1, vx2, x1; int vd1, vd2, d1; FIND_LEVEL_Y_NEAREST(ZN, ySize, vy1, vy2, y1, dummy); FIND_LEVEL_X_NEAREST(ZN, xSize, vx1, vx2, x1, dummy); FIND_LEVEL_D_NEAREST(ZN, dSize, vd1, vd2, d1, dummy); // Compute the dot product of the projective field's sensitivity map and the weight array (in 4-D). float s = 0.0f; for (int nf = 0; nf < nfSize; nf++) { for (int d = vd1, k = vd1 - d1; d <= vd2; d++, k++) { // Because the forward pass used the convolution for (int x = vx1, j = vx1 - x1; x <= vx2; x++, j++) { // convention, we have to use the correlation for (int y = vy1, i = vy1 - y1; y <= vy2; y++, i++) { // convention here. We don't flip the weight array. float n = READ_SENS_HANDLE (hn, nf , y, x, d); float w = READ_WEIGHT_VAL_HANDLE(hw, THIS_F, i, j, k, nf); s += n * w; } } } } // Multiply by the derivative of the forward-propagating value and store. s *= DSigmoid(READ_VAL); WRITE_SENS(s); cnpkg/demo/ 0000700 0162572 0000145 00000000000 11740760303 011747 5 ustar jmutch mit cnpkg/demo/cnpkg_run.m 0000600 0162572 0000145 00000007745 11425157660 014140 0 ustar jmutch mit % cnpkg_run is an example script that shows how to use cnpkg. We train a % network to perform denoising using a 2-D image. Note that cnpkg supports 3-D % networks; here the "depth" dimension has size 1. % % See also: cnpkg_buildmodel. %*********************************************************************************************************************** p = struct; % Network parameters. We have 1 input level, 2 hidden levels, and 1 output level. p.fCount = [3 10 10 3]; % Number of feature maps in each level. p.fSize = [0 7 7 1]; % Size of weights in Y and X for each level. p.fDepth = [0 1 1 1]; % Size of weights in D (depth) for each level. p.eta = [0 1 1 1] * 1e-2; % Learning rate for weights and biases for each level. miniOutSize = [3 3 1]; % Spatial size of output level for training [Y*X*D]. im = imread('autumn.tif'); if ~exist('nEpoch', 'var'), nEpoch = 100 ; end if ~exist('nIter' , 'var'), nIter = 50000; end % Prepare training set. im = single(im) / single(intmax(class(im))); input = im + single(0.1 * randn(size(im))); % Add noise. border = sum(p.fSize(2 : end) - 1) / 2; % Input is larger than output due to edge loss from the convolutions. label = im (border + 1 : end - border, border + 1 : end - border, :); dispInput = input(border + 1 : end - border, border + 1 : end - border, :); dispInput = min(max(dispInput, 0), 1); % Create two models, one for training and one for testing. m1 = cnpkg_buildmodel(p, miniOutSize); m1.input = permute(input, [3 1 2]); % Convert to [F*Y*X*D]. m1.label = permute(label, [3 1 2]); % Convert to [F*Y*X*D]. m2 = cnpkg_buildmodel(p, [size(label, 1), size(label, 2), 1]); m2.input = permute(input, [3 1 2]); m2.label = permute(label, [3 1 2]); m2.batch = [0 0 0]; % Initialize weights. for i = 2 : numel(p.fCount) siz = cell2mat(m1.layers{m1.zw(i)}.size); m1.layers{m1.zw(i)}.val = single(0.5 * randn(siz) / sqrt(prod(siz(1 : 4)))); m1.layers{m1.zb(i)}.val = single(0.5 * randn(siz(5), 1)); end % Loop over epochs. eloss = zeros(1, nEpoch); fprintf('training (%u epochs, %u iterations per epoch):\n', nEpoch, nIter); for epoch = 1 : nEpoch % Randomly select minibatches. m1.batch = zeros(nIter, 3); m1.batch(:, 1) = randi(size(label, 1) - miniOutSize(1) + 1, [nIter 1]) - 1; m1.batch(:, 2) = randi(size(label, 2) - miniOutSize(2) + 1, [nIter 1]) - 1; m1.batch(:, 3) = 0; % Iterate the training model once per minibatch. Each iteration consists of a forward pass, a backward pass, and % a weight update. t1 = clock; cns('init', m1, 'gpu', 'mean'); cns('run', nIter); % Retrieve weights, transfer them to testing model. for i = 2 : numel(p.fCount) m1.layers{m1.zw(i)}.val = cns('get', m1.zw(i), 'val'); m1.layers{m1.zb(i)}.val = cns('get', m1.zb(i), 'val'); m2.layers{m2.zw(i)}.val = m1.layers{m1.zw(i)}.val; m2.layers{m2.zb(i)}.val = m1.layers{m1.zb(i)}.val; end t1 = etime(clock, t1); % Iterate the testing model once and plot status. t2 = clock; cns('init', m2, 'gpu', 'mean'); cns('step', 1, m2.layers{m2.zx(end)}.stepNo(1)); % Don't need to backpropagate, etc. output = permute(cns('get', m2.zx(end), 'val'), [2 3 1]); t2 = etime(clock, t2); loss = mean(0.5 * (label - output) .^ 2, 3); output = min(max(output, 0), 1); eloss(epoch) = mean(loss(:)); fprintf('completed epoch #%u of %u (train: %f sec, test: %f sec)\n', epoch, nEpoch, t1, t2); figure(1); clf; plot(eloss(1 : epoch)); title('loss'); xlabel('epoch'); figure(2); clf; ax = []; ax(1) = subplot(221); image (label ); title('clean image' ); axis image off; ax(2) = subplot(222); image (dispInput); title('noisy image' ); axis image off; ax(3) = subplot(223); imagesc(loss ); title('loss image' ); axis image off; colorbar; ax(4) = subplot(224); image (output ); title('denoised image'); axis image off; linkaxes(ax); end % Release GPU resources. cns('done'); cnpkg/demo/cnpkg_buildmodel.m 0000600 0162572 0000145 00000011757 11467743035 015455 0 ustar jmutch mit function m = cnpkg_buildmodel(p, outSize) % M = cnpkg_buildmodel(P) returns a CNS network model structure for a 3-D % convolutional network which can be used as input to cns('init'). P is a % small set of parameters that define the network, and OUTSIZE is the spatial % size [Y*X*D] of the output level. See the example in cnpkg_run for details. % % See also: cnpkg_run. %*********************************************************************************************************************** n = numel(p.fCount); % Number of 'X' levels. zx = [1, 4 + (0 : n - 2) * 3]; % CNS layer numbers for all levels. zw = [0, 2 + (0 : n - 2) * 3]; % CNS layer numbers for all weight layers. zb = [0, 3 + (0 : n - 2) * 3]; % CNS layer numbers for all bias layers. m.package = 'cnpkg'; % Set up the input level. m.layers{zx(1)}.z = zx(1); % For display only. m.layers{zx(1)}.name = 'x1'; m.layers{zx(1)}.type = 'input'; m.layers{zx(1)}.stepNo = 1; % Step number for forward pass. m.layers{zx(1)}.size{1} = p.fCount(1); % Sizes of spatial dimensions to be determined by cns_mapdim (below). % Set up the hidden levels. for i = 2 : n - 1 m.layers{zx(i)}.z = zx(i); % For display only. m.layers{zx(i)}.name = sprintf('x%u', i); m.layers{zx(i)}.type = 'hidden'; m.layers{zx(i)}.stepNo = [i, 2 * n - i + 1]; % Step numbers for forward and backward passes. m.layers{zx(i)}.kernel = {'', 'back'}; m.layers{zx(i)}.zp = zx(i - 1); % Pointer to previous level (for forward pass). m.layers{zx(i)}.zw = zw(i); % Pointer to previous weight layer (for forward pass). m.layers{zx(i)}.zb = zb(i); % Pointer to previous bias layer (for forward pass). m.layers{zx(i)}.znw = zw(i + 1); % Pointer to next weight layer (for backward pass). m.layers{zx(i)}.zn = zx(i + 1); % Pointer to next level (for backward pass). m.layers{zx(i)}.size{1} = p.fCount(i); % Sizes of spatial dimensions to be determined by cns_mapdim (below). end % Set up the output level. m.layers{zx(n)}.z = zx(n); % For display only. m.layers{zx(n)}.name = sprintf('x%u', n); m.layers{zx(n)}.type = 'output'; m.layers{zx(n)}.stepNo = [n, n + 1]; % Step numbers for forward and backward passes. m.layers{zx(n)}.kernel = {'', 'back'}; m.layers{zx(n)}.zp = zx(n - 1); % Pointer to previous level (for forward pass). m.layers{zx(n)}.zw = zw(n); % Pointer to previous weight layer (for forward pass). m.layers{zx(n)}.zb = zb(n); % Pointer to previous bias layer (for forward pass). m.layers{zx(n)}.size{1} = p.fCount(n); % Sizes of spatial dimensions to be determined by cns_mapdim (below). % Set up weight and bias layers. for i = 2 : n m.layers{zw(i)}.z = zw(i); % For display only. m.layers{zw(i)}.name = sprintf('w%u', i); m.layers{zw(i)}.type = 'weight'; m.layers{zw(i)}.stepNo = 2 * n; % All weights are updated in parallel after the backward pass. m.layers{zw(i)}.zp = zx(i - 1); % Pointer to previous level (for update). m.layers{zw(i)}.zn = zx(i); % Pointer to next level (for update). m.layers{zw(i)}.eta = p.eta(i); m.layers{zw(i)}.size{1} = p.fCount(i - 1); m.layers{zw(i)}.size{2} = p.fSize (i); m.layers{zw(i)}.size{3} = p.fSize (i); m.layers{zw(i)}.size{4} = p.fDepth(i); m.layers{zw(i)}.size{5} = p.fCount(i); m.layers{zb(i)}.z = zb(i); % For display only. m.layers{zb(i)}.name = sprintf('b%u', i); m.layers{zb(i)}.type = 'bias'; m.layers{zb(i)}.stepNo = 2 * n; % All biases are updated in parallel after the backward pass. m.layers{zb(i)}.zn = zx(i); % Pointer to next level (for update). m.layers{zb(i)}.eta = p.eta(i); m.layers{zb(i)}.size{1} = p.fCount(i); end % Set the spatial size of the output level and assign each output cell to a position in the real-valued, common % coordinate system. m = cns_mapdim(m, zx(n), 2, 'pixels', outSize(1)); m = cns_mapdim(m, zx(n), 3, 'pixels', outSize(2)); m = cns_mapdim(m, zx(n), 4, 'pixels', outSize(3)); % Now grow the network downwards from output to input, creating each level with the appropriate size, and the % appropriate arrangement of its cells in the common coordinate system, such that it will generate the level above via % valid convolution. Each level will be larger than the one above it. for i = n - 1 : -1 : 1 m = cns_mapdim(m, zx(i), 2, 'int-td', zx(i + 1), p.fSize (i + 1), 1); m = cns_mapdim(m, zx(i), 3, 'int-td', zx(i + 1), p.fSize (i + 1), 1); m = cns_mapdim(m, zx(i), 4, 'int-td', zx(i + 1), p.fDepth(i + 1), 1); end % This flag turns off double-buffering in CNS, which is not needed for this model. See the CNS manual. m.independent = true; % CNS doesn't need these, but we'll store them in the model structure for our own use. m.zx = zx; m.zw = zw; m.zb = zb; return; cnpkg/cnpkg_level.m 0000600 0162572 0000145 00000001771 11526727652 013516 0 ustar jmutch mit classdef cnpkg_level < cnpkg_base methods (Static) % Abstract type encompassing all 'X' layers: input, hidden, and output. Does not include weight and bias layers. %----------------------------------------------------------------------------------------------------------------------- function p = CNSProps p.abstract = true; p.dnames = {'f' 'y' 'x' 'd'}; % Levels are 4-dimensional. p.dmap = [false true true true]; % The spatial dimensions will be mapped to common real-valued coordinates. p.dims = {1 1 2 2}; p.dparts = {2 1 1 2}; % Y and X are inner dimensions and should be iterated over using inner loops. end %----------------------------------------------------------------------------------------------------------------------- function f = CNSFields f.val = {'cv', 'cache', 'dflt', 0}; % Holds the (forward-propagating) value of each cell. end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/cnpkg_bias.h 0000600 0162572 0000145 00000001626 11526727652 013317 0 ustar jmutch mit // Kernel for a single cell of a bias layer. // Block size is usually [16 N] where N is found by experiment. See CNS manual. // N might need to be reduced for cards below GTX 285/295. #BLOCKSIZE 16 16 // Get handle to the next level. This allows us to use the 'HANDLE' versions of macros, which are somewhat faster. COMPUTED_SENS_HANDLE hn = GET_COMPUTED_SENS_HANDLE(ZN); // Get spatial dimensions of the next level. int ySize = COMPUTED_SENS_HANDLE_Y_SIZE(hn); int xSize = COMPUTED_SENS_HANDLE_X_SIZE(hn); int dSize = COMPUTED_SENS_HANDLE_D_SIZE(hn); // Average over the entire next level (for this feature). float e = 0.0f; for (int d = 0; d < dSize; d++) { for (int x = 0; x < xSize; x++) { for (int y = 0; y < ySize; y++) { e += READ_COMPUTED_SENS_HANDLE(hn, THIS_NF, y, x, d); } } } e /= (float)(ySize * xSize * dSize); // Perform the bias update. float b = READ_VAL; b += ETA * e; WRITE_VAL(b); cnpkg/cnpkg_base.m 0000600 0162572 0000145 00000001052 11530140054 013265 0 ustar jmutch mit classdef cnpkg_base < cns_base methods (Static) % Base type for all layers (including weight and bias layers). %----------------------------------------------------------------------------------------------------------------------- function p = CNSProps % Because the base type is abstract, we don't have to define its dimensionality. That allows subtypes to have % different dimensionalities. p.abstract = true; end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/Contents.m 0000600 0162572 0000145 00000000175 11355323054 013003 0 ustar jmutch mit % cnpkg: 3-D Convolutional Network Package for CNS % % Click here for help. cnpkg/cnpkg.h 0000600 0162572 0000145 00000000436 11526727652 012317 0 ustar jmutch mit // Contains helper functions that any kernel can use. // Activation function y = f(x). INLINE float Sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } // Derivative dy/dx of the activation function (in terms of y). INLINE float DSigmoid(float y) { return y * (1.0f - y); } cnpkg/cnpkg_computed.h 0000600 0162572 0000145 00000003024 11526727652 014213 0 ustar jmutch mit // Kernel for a single cell of a hidden or output layer (forward pass). // Block size is usually [16 N] where N is found by experiment. See CNS manual. // N might need to be reduced for cards below GTX 285/295. #BLOCKSIZE 16 16 // Get handles to the previous level and weight layer. This allows us to use the 'HANDLE' versions of macros, // which are somewhat faster. VAL_HANDLE hp = GET_LEVEL_VAL_HANDLE (ZP); WEIGHT_VAL_HANDLE hw = GET_WEIGHT_VAL_HANDLE(ZW); // Get dimensions of weight array. int fSize = WEIGHT_VAL_HANDLE_F_SIZE(hw); int ySize = WEIGHT_VAL_HANDLE_Y_SIZE(hw); int xSize = WEIGHT_VAL_HANDLE_X_SIZE(hw); int dSize = WEIGHT_VAL_HANDLE_D_SIZE(hw); // Find our receptive field in the previous level. int y1, x1, d1, dummy; FIND_LEVEL_Y_NEAREST(ZP, ySize, y1, dummy); FIND_LEVEL_X_NEAREST(ZP, xSize, x1, dummy); FIND_LEVEL_D_NEAREST(ZP, dSize, d1, dummy); // Compute the dot product of the receptive field and the weight array (in 4-D). float v = 0.0f; for (int f = 0; f < fSize; f++) { for (int k = dSize - 1, d = d1; k >= 0; k--, d++) { // We use the convolution convention for the spatial for (int j = xSize - 1, x = x1; j >= 0; j--, x++) { // dimensions, flipping the weight array. for (int i = ySize - 1, y = y1; i >= 0; i--, y++) { float p = READ_VAL_HANDLE (hp, f, y, x, d); float w = READ_WEIGHT_VAL_HANDLE(hw, f, i, j, k, THIS_F); v += p * w; } } } } // Add bias, apply nonlinearity, and save. v += READ_BIAS_VAL(ZB, THIS_F); v = Sigmoid(v); WRITE_VAL(v); cnpkg/cnpkg_output_back.h 0000600 0162572 0000145 00000001076 11526727652 014720 0 ustar jmutch mit // Backward pass for a single cell of an output layer. Computes the sensitivity. // Block size is usually [16 N] where N is found by experiment. See CNS manual. // N might need to be reduced for cards below GTX 285/295. #BLOCKSIZE 16 16 // Get minibatch offset into label block for this iteration. int yoff = READ_BATCH(ITER_NO, 0); int xoff = READ_BATCH(ITER_NO, 1); int doff = READ_BATCH(ITER_NO, 2); // Get label value. float c = READ_LABEL(THIS_F, THIS_Y + yoff, THIS_X + xoff, THIS_D + doff); float v = READ_VAL; float s = DSigmoid(v) * (c - v); WRITE_SENS(s); cnpkg/cnpkg_bias.m 0000600 0162572 0000145 00000001650 11526727652 013321 0 ustar jmutch mit classdef cnpkg_bias < cnpkg_base methods (Static) % Bias type. Each weight array in a weight layer has a corresponding scalar bias value in a bias layer. The bias is % also updated by backpropagation. %----------------------------------------------------------------------------------------------------------------------- function p = CNSProps p.dnames = {'nf'}; % Bias layers are 1-dimensional. p.dims = {1}; p.dparts = {1}; end %----------------------------------------------------------------------------------------------------------------------- function f = CNSFields f.zn = {'lz', 'type', 'computed'}; % Pointer to the next 'X' level (that these biases help generate). f.eta = {'lp', 'private'}; % Learning rate for these biases. f.val = {'cv', 'cache'}; % Holds the bias values. end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/cnpkg.m 0000600 0162572 0000145 00000001760 11530140054 012301 0 ustar jmutch mit classdef cnpkg < cns_package methods (Static) % Contains definitions that apply to an entire model. %----------------------------------------------------------------------------------------------------------------------- function f = CNSFields % These 4-D data blocks will contain the full input image and its corresponding ground truth output. f.input = {'ma', 'dnames', {'f' 'y' 'x' 'd'}, 'dims', {1 1 2 2}, 'dparts', {2 1 1 2}}; f.label = {'ma', 'dnames', {'f' 'y' 'x' 'd'}, 'dims', {1 1 2 2}, 'dparts', {2 1 1 2}}; % Each iteration of the network will use a different subcube of the input (and corresponding output). Such a subcube % is called a 'minibatch'. The following 2-D data block will contain the Y, X, and D offsets that define the % minibatch for each iteration. f.batch = {'ma', 'cache', 'dnames', {'i' 'c'}, 'dims', {1 2}, 'dparts', {1 1}, 'int'}; end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/cnpkg_computed.m 0000600 0162572 0000145 00000002006 11526727652 014217 0 ustar jmutch mit classdef cnpkg_computed < cnpkg_level methods (Static) % Abstract type encompassing 'X' layers that perform computations, i.e., hidden and output layers. %----------------------------------------------------------------------------------------------------------------------- function p = CNSProps p.abstract = true; p.kernels = {'back'}; end %----------------------------------------------------------------------------------------------------------------------- function f = CNSFields f.zp = {'lz', 'type', 'level'}; % Pointer to the previous level. f.zw = {'lz', 'type', 'weight'}; % Pointer to the weights used to generate this level from the previous level. f.zb = {'lz', 'type', 'bias'}; % Pointer to the biases used to generate this level from the previous level. f.sens = {'cv', 'cache', 'dflt', 0}; % Holds the backward-propagating error term ('sensitivity') for each cell. end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/cnpkg_weight.h 0000600 0162572 0000145 00000003066 11526727652 013670 0 ustar jmutch mit // Kernel for a single cell of a weight layer. // Block size is usually [16 N] where N is found by experiment. See CNS manual. // N might need to be reduced for cards below GTX 285/295. #BLOCKSIZE 16 16 // Get handles to the previous and next levels. This allows us to use the 'HANDLE' versions of macros, which are // somewhat faster. LEVEL_VAL_HANDLE hp = GET_LEVEL_VAL_HANDLE (ZP); COMPUTED_SENS_HANDLE hn = GET_COMPUTED_SENS_HANDLE(ZN); // Get spatial dimensions of the next level. It will always be smaller than the previous level. int nySize = COMPUTED_SENS_HANDLE_Y_SIZE(hn); int nxSize = COMPUTED_SENS_HANDLE_X_SIZE(hn); int ndSize = COMPUTED_SENS_HANDLE_D_SIZE(hn); // Find the corresponding subcube in the previous level. This depends on which weight cell we are updating. // Note we are performing a valid correlation. int py1 = Y_SIZE - 1 - THIS_Y; int px1 = X_SIZE - 1 - THIS_X; int pd1 = D_SIZE - 1 - THIS_D; // Compute the dot product of the next level and its corresponding subcube in the previous level (in 3-D). // Normalize by number of elements. float e = 0.0f; for (int nd = 0, pd = pd1; nd < ndSize; nd++, pd++) { // This is a correlation. Nothing gets flipped. for (int nx = 0, px = px1; nx < nxSize; nx++, px++) { for (int ny = 0, py = py1; ny < nySize; ny++, py++) { float p = READ_LEVEL_VAL_HANDLE (hp, THIS_F , py, px, pd); float n = READ_COMPUTED_SENS_HANDLE(hn, THIS_NF, ny, nx, nd); e += p * n; } } } e /= (float)(nySize * nxSize * ndSize); // Perform the weight update. float w = READ_VAL; w += ETA * e; WRITE_VAL(w); cnpkg/cnpkg_weight.m 0000600 0162572 0000145 00000002174 11526727652 013674 0 ustar jmutch mit classdef cnpkg_weight < cnpkg_base methods (Static) % Weight type. A weight layer holds a set of weights used to generate one 'X' level from another. The goal of % backpropagation is to find optimal weights. (See also the bias type.) %----------------------------------------------------------------------------------------------------------------------- function p = CNSProps p.dnames = {'f' 'y' 'x' 'd' 'nf'}; % Weight layers are 5-dimensional. p.dims = {1 1 2 2 1}; p.dparts = {2 1 1 2 3}; % Inner dimensions are the same as for 'X' levels. end %----------------------------------------------------------------------------------------------------------------------- function f = CNSFields f.zp = {'lz', 'type', 'level'}; % Pointer to the previous 'X' level. f.zn = {'lz', 'type', 'computed'}; % Pointer to the next 'X' level (that these weights help generate). f.eta = {'lp', 'private'}; % Learning rate for this set of weights. f.val = {'cv', 'cache'}; % Holds the weight values. end %----------------------------------------------------------------------------------------------------------------------- end end cnpkg/doc/ 0000700 0162572 0000145 00000000000 11740760303 011570 5 ustar jmutch mit cnpkg/doc/index.html 0000600 0162572 0000145 00000004530 11740757345 013605 0 ustar jmutch mit
Authors: | Jim Mutch and Srini Turaga |
Homepage: | http://cbcl.mit.edu/jmutch/cns |
Once the package has been installed, make sure it's working by running the following demo script:
The CNS manual contains a troubleshooting section if you encounter problems.>> cnpkg_run
cnpkg
currently consists of:
Rev# | Date | Changes |
r14 | 2010-11-13 | Updated to use #BLOCKSIZE . |
r15 | 2010-11-13 | No longer uses PHASE_NO macro. |
r17 | 2011-01-28 | Updated to use FIND_type_dim_... macros. |
r19 | 2011-02-17 | Cell types are now defined using classdef . |
r22 | 2012-04-10 | Tested under CUDA 4.1. |