Skip to content

Commit

Permalink
update leaky parallel docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
jeshraghian committed Nov 19, 2023
1 parent 67e5a85 commit 21e9379
Showing 1 changed file with 13 additions and 7 deletions.
20 changes: 13 additions & 7 deletions snntorch/_neurons/leakyparallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class LeakyParallel(nn.Module):
* :math:`β` - Membrane potential decay rate
Several differences between `LeakyParallel` and `Leaky` include:
* Negative hidden states are clipped due to the forced ReLU operation in RNN
* Linear weights are included in addition to recurrent weights
* `beta` is clipped between [0,1] and cloned to `weight_hh_l` only upon layer initialization. It is unused otherwise
Expand All @@ -38,15 +39,20 @@ class LeakyParallel(nn.Module):
import snntorch as snn
beta = 0.5
num_inputs = 784
num_hidden = 128
num_outputs = 10
batch_size = 128
x = torch.rand((num_steps, batch_size, num_inputs))
# Define Network
class Net(nn.Module):
def __init__(self):
super().__init__()
# initialize layers
self.lif1 = snn.LeakyParallel(input_size=784, hidden_size=128)
self.lif2 = snn.LeakyParallel(input_size=128, hidden_size=10, beta=beta)
self.lif1 = snn.LeakyParallel(input_size=num_inputs, hidden_size=num_hidden) # randomly initialize recurrent weights
self.lif2 = snn.LeakyParallel(input_size=num_hidden, hidden_size=num_outputs, beta=beta, learn_beta=True) # learnable recurrent weights initialized at beta
def forward(self, x):
spk1 = self.lif1(x)
Expand Down Expand Up @@ -94,11 +100,13 @@ def forward(self, x):
to False
:type learn_threshold: bool, optional
:param weight_hh_enable: Option to set the hidden matrix to be dense or diagonal. Diagonal (i.e., False) adheres to how a LIF neuron works. Dense (True) would allow the membrane potential of one LIF neuron to influence all others, and follow the RNN default implementation. Defaults to False
:param weight_hh_enable: Option to set the hidden matrix to be dense or
diagonal. Diagonal (i.e., False) adheres to how a LIF neuron works.
Dense (True) would allow the membrane potential of one LIF neuron to
influence all others, and follow the RNN default implementation. Defaults to False
:type weight_hh_enable: bool, optional
Inputs: \\input_
- **input_** of shape of shape `(L, H_{in})` for unbatched input,
or `(L, N, H_{in})` containing the features of the input sequence.
Expand Down Expand Up @@ -186,9 +194,7 @@ def __init__(
def forward(self, input_):
mem = self.rnn(input_)
# mem[0] contains relu'd outputs, mem[1] contains final hidden state
mem_shift = mem[0] - self.threshold
# print(mem[0])
# print(self.rnn.weight_hh_l0)
mem_shift = mem[0] - self.threshold # self.rnn.weight_hh_l0
spk = self.spike_grad(mem_shift)
spk = spk * self.graded_spikes_factor
return spk
Expand Down

0 comments on commit 21e9379

Please sign in to comment.