aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorAlexey Dobriyan2005-05-26 12:59:42 -0700
committerDavid S. Miller2005-05-26 12:59:42 -0700
commitc8b35d2a29ec3c93e3b9c1e70d649a77a214b1c1 (patch)
treecbfd8aa83c964984bc6ffd0f3559d711f157bc40 /net
parentc6b3365391c626206f6789354794a81a010cb7a1 (diff)
[TOKENRING]: net/802/tr.c: s/struct rif_cache_s/struct rif_cache/
"_s" suffix is certainly of hungarian origin. Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index 3bfb86260f27..a755e880f4ba 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy);
* Each RIF entry we learn is kept this way
*/
-struct rif_cache_s {
+struct rif_cache {
unsigned char addr[TR_ALEN];
int iface;
__be16 rcf;
__be16 rseg[8];
- struct rif_cache_s *next;
+ struct rif_cache *next;
unsigned long last_used;
unsigned char local_ring;
};
@@ -64,7 +64,7 @@ struct rif_cache_s {
* up a lot.
*/
-static struct rif_cache_s *rif_table[RIF_TABLE_SIZE];
+static struct rif_cache *rif_table[RIF_TABLE_SIZE];
static DEFINE_SPINLOCK(rif_lock);
@@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
{
int slack;
unsigned int hash;
- struct rif_cache_s *entry;
+ struct rif_cache *entry;
unsigned char *olddata;
static const unsigned char mcast_func_addr[]
= {0xC0,0x00,0x00,0x04,0x00,0x00};
@@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
{
unsigned int hash, rii_p = 0;
- struct rif_cache_s *entry;
+ struct rif_cache *entry;
spin_lock_bh(&rif_lock);
@@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
* FIXME: We ought to keep some kind of cache size
* limiting and adjust the timers to suit.
*/
- entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC);
+ entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
if(!entry)
{
@@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy)
spin_lock_bh(&rif_lock);
for(i =0; i < RIF_TABLE_SIZE; i++) {
- struct rif_cache_s *entry, **pentry;
+ struct rif_cache *entry, **pentry;
pentry = rif_table+i;
while((entry=*pentry) != NULL) {
@@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy)
#ifdef CONFIG_PROC_FS
-static struct rif_cache_s *rif_get_idx(loff_t pos)
+static struct rif_cache *rif_get_idx(loff_t pos)
{
int i;
- struct rif_cache_s *entry;
+ struct rif_cache *entry;
loff_t off = 0;
for(i = 0; i < RIF_TABLE_SIZE; i++)
@@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int i;
- struct rif_cache_s *ent = v;
+ struct rif_cache *ent = v;
++*pos;
@@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v)
static int rif_seq_show(struct seq_file *seq, void *v)
{
int j, rcf_len, segment, brdgnmb;
- struct rif_cache_s *entry = v;
+ struct rif_cache *entry = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq,