aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl6
-rw-r--r--Documentation/feature-removal-schedule.txt37
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/alpha/kernel/entry.S3
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/mips/kernel/linux32.c34
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/um/drivers/net_kern.c39
-rw-r--r--arch/um/include/shared/net_kern.h2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c22
-rw-r--r--arch/x86/include/asm/ia32.h7
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--drivers/char/agp/intel-agp.c21
-rw-r--r--drivers/firewire/fw-card.c149
-rw-r--r--drivers/firewire/fw-cdev.c1044
-rw-r--r--drivers/firewire/fw-device.c203
-rw-r--r--drivers/firewire/fw-device.h23
-rw-r--r--drivers/firewire/fw-iso.c227
-rw-r--r--drivers/firewire/fw-ohci.c260
-rw-r--r--drivers/firewire/fw-sbp2.c57
-rw-r--r--drivers/firewire/fw-topology.c29
-rw-r--r--drivers/firewire/fw-topology.h19
-rw-r--r--drivers/firewire/fw-transaction.c185
-rw-r--r--drivers/firewire/fw-transaction.h138
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c235
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_info.c328
-rw-r--r--drivers/gpu/drm/drm_proc.c721
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c257
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c334
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
-rw-r--r--drivers/ieee1394/csr.c8
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/highlevel.c2
-rw-r--r--drivers/ieee1394/nodemgr.c4
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/raw1394.c14
-rw-r--r--drivers/ieee1394/sbp2.c9
-rw-r--r--drivers/ieee1394/video1394.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c10
-rw-r--r--drivers/mtd/mtdsuper.c7
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ac3200.c22
-rw-r--r--drivers/net/appletalk/cops.c45
-rw-r--r--drivers/net/appletalk/ltpc.c38
-rw-r--r--drivers/net/at1700.c19
-rw-r--r--drivers/net/benet/be_main.c28
-rw-r--r--drivers/net/cs89x0.c28
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb3/sge.c164
-rw-r--r--drivers/net/cxgb3/t3_hw.c80
-rw-r--r--drivers/net/depca.c19
-rw-r--r--drivers/net/eepro.c17
-rw-r--r--drivers/net/eexpress.c17
-rw-r--r--drivers/net/eth16i.c18
-rw-r--r--drivers/net/ethoc.c1112
-rw-r--r--drivers/net/ewrk3.c19
-rw-r--r--drivers/net/gianfar.c48
-rw-r--r--drivers/net/ibmlana.c17
-rw-r--r--drivers/net/irda/donauboe.c12
-rw-r--r--drivers/net/lance.c19
-rw-r--r--drivers/net/lp486e.c17
-rw-r--r--drivers/net/ni52.c21
-rw-r--r--drivers/net/ni65.c75
-rw-r--r--drivers/net/seeq8005.c17
-rw-r--r--drivers/net/smc-ultra.c5
-rw-r--r--drivers/net/smc-ultra32.c23
-rw-r--r--drivers/net/smc9194.c17
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/tokenring/madgemc.c11
-rw-r--r--drivers/net/tokenring/proteon.c9
-rw-r--r--drivers/net/tokenring/skisa.c9
-rw-r--r--drivers/net/tokenring/smctr.c16
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/wan/sdla.c36
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ar9170/Kconfig17
-rw-r--r--drivers/net/wireless/ar9170/Makefile3
-rw-r--r--drivers/net/wireless/ar9170/ar9170.h209
-rw-r--r--drivers/net/wireless/ar9170/cmd.c129
-rw-r--r--drivers/net/wireless/ar9170/cmd.h91
-rw-r--r--drivers/net/wireless/ar9170/eeprom.h179
-rw-r--r--drivers/net/wireless/ar9170/hw.h417
-rw-r--r--drivers/net/wireless/ar9170/led.c171
-rw-r--r--drivers/net/wireless/ar9170/mac.c452
-rw-r--r--drivers/net/wireless/ar9170/main.c1671
-rw-r--r--drivers/net/wireless/ar9170/phy.c1240
-rw-r--r--drivers/net/wireless/ar9170/usb.c748
-rw-r--r--drivers/net/wireless/ar9170/usb.h74
-rw-r--r--drivers/net/wireless/arlan-main.c21
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h35
-rw-r--r--drivers/net/wireless/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath5k/base.c46
-rw-r--r--drivers/net/wireless/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath5k/desc.c4
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c774
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h128
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath5k/phy.c1170
-rw-r--r--drivers/net/wireless/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath5k/reset.c35
-rw-r--r--drivers/net/wireless/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.h2
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath9k/beacon.c56
-rw-r--r--drivers/net/wireless/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath9k/eeprom.c308
-rw-r--r--drivers/net/wireless/ath9k/eeprom.h5
-rw-r--r--drivers/net/wireless/ath9k/hw.c14
-rw-r--r--drivers/net/wireless/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath9k/initvals.h2
-rw-r--r--drivers/net/wireless/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath9k/main.c47
-rw-r--r--drivers/net/wireless/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath9k/phy.c2
-rw-r--r--drivers/net/wireless/ath9k/phy.h2
-rw-r--r--drivers/net/wireless/ath9k/rc.c23
-rw-r--r--drivers/net/wireless/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath9k/reg.h2
-rw-r--r--drivers/net/wireless/ath9k/regd.c2
-rw-r--r--drivers/net/wireless/ath9k/regd.h2
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2
-rw-r--r--drivers/net/wireless/ath9k/xmit.c78
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c38
-rw-r--r--drivers/net/wireless/libertas/radiotap.h10
-rw-r--r--drivers/net/wireless/libertas/rx.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/p54/Kconfig39
-rw-r--r--drivers/net/wireless/p54/p54common.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/wavelan.c79
-rw-r--r--drivers/net/wireless/wavelan.p.h9
-rw-r--r--drivers/ssb/Kconfig16
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_super.c5
-rw-r--r--fs/Kconfig56
-rw-r--r--fs/Makefile6
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c8
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/attr.c3
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/block_dev.c146
-rw-r--r--fs/buffer.c145
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/compat.c28
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c48
-rw-r--r--fs/devpts/inode.c188
-rw-r--r--fs/dlm/dir.c18
-rw-r--r--fs/dlm/dlm_internal.h2
-rw-r--r--fs/dlm/lock.c60
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c181
-rw-r--r--fs/dlm/user.c24
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ext2/balloc.c8
-rw-r--r--fs/ext2/ialloc.c10
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext3/balloc.c8
-rw-r--r--fs/ext3/ialloc.c12
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/namei.c6
-rw-r--r--fs/ext3/super.c48
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/ialloc.c12
-rw-r--r--fs/ext4/inode.c40
-rw-r--r--fs/ext4/mballoc.c46
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/ops_dentry.c2
-rw-r--r--fs/gfs2/super.h2
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/sysdep.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dentry.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_dtree.c18
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_xtree.c14
-rw-r--r--fs/jfs/namei.c10
-rw-r--r--fs/jfs/xattr.c12
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c48
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/dir.c4
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/notify/inotify/inotify.c16
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/dcache.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/pipe.c7
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c (renamed from fs/dquot.c)572
-rw-r--r--fs/quota/quota.c (renamed from fs/quota.c)37
-rw-r--r--fs/quota/quota_tree.c (renamed from fs/quota_tree.c)132
-rw-r--r--fs/quota/quota_tree.h (renamed from fs/quota_tree.h)0
-rw-r--r--fs/quota/quota_v1.c (renamed from fs/quota_v1.c)48
-rw-r--r--fs/quota/quota_v2.c (renamed from fs/quota_v2.c)3
-rw-r--r--fs/quota/quotaio_v1.h (renamed from fs/quotaio_v1.h)0
-rw-r--r--fs/quota/quotaio_v2.h (renamed from fs/quotaio_v2.h)0
-rw-r--r--fs/ramfs/file-nommu.c6
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/namei.c6
-rw-r--r--fs/reiserfs/stree.c14
-rw-r--r--fs/reiserfs/super.c60
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/smbfs/dir.c4
-rw-r--r--fs/super.c17
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/balloc.c14
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/ufs/ialloc.c8
-rw-r--r--fs/ufs/inode.c39
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/super.c11
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--include/drm/drmP.h77
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/firewire-cdev.h218
-rw-r--r--include/linux/fs.h220
-rw-r--r--include/linux/ieee80211.h17
-rw-r--r--include/linux/if_frad.h1
-rw-r--r--include/linux/ncp_fs.h2
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netfilter/x_tables.h23
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nl80211.h88
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/quotaops.h119
-rw-r--r--include/net/cfg80211.h203
-rw-r--r--include/net/ethoc.h22
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/mac80211.h84
-rw-r--r--include/net/netfilter/nf_conntrack.h14
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h6
-rw-r--r--include/net/netlink.h1
-rw-r--r--include/net/netns/conntrack.h5
-rw-r--r--kernel/cgroup.c5
-rw-r--r--lib/nlattr.c27
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/ax25/af_ax25.c12
-rw-r--r--net/core/dev.c9
-rw-r--r--net/ipv4/netfilter/arp_tables.c18
-rw-r--r--net/ipv4/netfilter/ip_tables.c27
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c63
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c27
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c6
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c232
-rw-r--r--net/mac80211/cfg.c245
-rw-r--r--net/mac80211/debugfs.c24
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h78
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/main.c25
-rw-r--r--net/mac80211/mlme.c259
-rw-r--r--net/mac80211/pm.c78
-rw-r--r--net/mac80211/rate.c6
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rx.c29
-rw-r--r--net/mac80211/scan.c77
-rw-r--r--net/mac80211/sta_info.c17
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tx.c579
-rw-r--r--net/mac80211/util.c126
-rw-r--r--net/mac80211/wep.c21
-rw-r--r--net/mac80211/wext.c33
-rw-r--r--net/mac80211/wpa.c28
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c129
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c94
-rw-r--r--net/netfilter/nf_conntrack_proto.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c15
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c57
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/netfilter/xt_physdev.c21
-rw-r--r--net/netrom/af_netrom.c17
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/wireless/Kconfig50
-rw-r--r--net/wireless/Makefile3
-rw-r--r--net/wireless/core.c30
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/mlme.c46
-rw-r--r--net/wireless/nl80211.c824
-rw-r--r--net/wireless/nl80211.h38
-rw-r--r--net/wireless/reg.c65
-rw-r--r--net/wireless/scan.c27
-rw-r--r--net/wireless/wext-compat.c11
-rw-r--r--net/x25/af_x25.c6
-rw-r--r--net/xfrm/xfrm_state.c2
408 files changed, 18120 insertions, 6386 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index 8af6d9626878..fbeaffc1dcc3 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -227,6 +227,12 @@ usage should require reading the full document.
!Pinclude/net/mac80211.h Powersave support
</chapter>
+ <chapter id="beacon-filter">
+ <title>Beacon filter support</title>
+!Pinclude/net/mac80211.h Beacon filter support
+!Finclude/net/mac80211.h ieee80211_beacon_loss
+ </chapter>
+
<chapter id="qos">
<title>Multiple queues and QoS support</title>
<para>TBD</para>
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index e47c0ff8ba7a..02ea3773535e 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,20 +6,47 @@ be removed from this file.
---------------------------
-What: old static regulatory information and ieee80211_regdom module parameter
-When: 2.6.29
+What: The ieee80211_regdom module parameter
+When: March 2010 / desktop catchup
+
+Why: This was inherited by the CONFIG_WIRELESS_OLD_REGULATORY code,
+ and currently serves as an option for users to define an
+ ISO / IEC 3166 alpha2 code for the country they are currently
+ present in. Although there are userspace API replacements for this
+ through nl80211 distributions haven't yet caught up with implementing
+ decent alternatives through standard GUIs. Although available as an
+ option through iw or wpa_supplicant its just a matter of time before
+ distributions pick up good GUI options for this. The ideal solution
+ would actually consist of intelligent designs which would do this for
+ the user automatically even when travelling through different countries.
+ Until then we leave this module parameter as a compromise.
+
+ When userspace improves with reasonable widely-available alternatives for
+ this we will no longer need this module parameter. This entry hopes that
+ by the super-futuristically looking date of "March 2010" we will have
+ such replacements widely available.
+
+Who: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+---------------------------
+
+What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information
+When: March 2010 / desktop catchup
+
Why: The old regulatory infrastructure has been replaced with a new one
which does not require statically defined regulatory domains. We do
not want to keep static regulatory domains in the kernel due to the
the dynamic nature of regulatory law and localization. We kept around
the old static definitions for the regulatory domains of:
+
* US
* JP
* EU
+
and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
- set. We also kept around the ieee80211_regdom module parameter in case
- some applications were relying on it. Changing regulatory domains
- can now be done instead by using nl80211, as is done with iw.
+ set. We will remove this option once the standard Linux desktop catches
+ up with the new userspace APIs we have implemented.
+
Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index fa7be04b0cf0..64c89c215b01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -765,6 +765,14 @@ L: linux-wireless@vger.kernel.org
L: ath9k-devel@lists.ath9k.org
S: Supported
+ATHEROS AR9170 WIRELESS DRIVER
+P: Christian Lamparter
+M: chunkeey@web.de
+L: linux-wireless@vger.kernel.org
+W: http://wireless.kernel.org/en/users/Drivers/ar9170
+S: Maintained
+F: drivers/net/wireless/ar9170/
+
ATI_REMOTE2 DRIVER
P: Ville Syrjala
M: syrjala@sci.fi
@@ -3602,7 +3610,7 @@ S: Maintained
RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
L: linux-wireless@vger.kernel.org
-L: rt2400-devel@lists.sourceforge.net
+L: users@rt2x00.serialmonkey.com
W: http://rt2x00.serialmonkey.com/
S: Maintained
T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index e4a54b615894..b45d913a51c3 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -903,8 +903,9 @@ sys_alpha_pipe:
stq $26, 0($sp)
.prologue 0
+ mov $31, $17
lda $16, 8($sp)
- jsr $26, do_pipe
+ jsr $26, do_pipe_flags
ldq $26, 0($sp)
bne $0, 1f
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ae41f097864b..42ee05981e71 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -46,8 +46,6 @@
#include <asm/hwrpb.h>
#include <asm/processor.h>
-extern int do_pipe(int *);
-
/*
* Brk needs to return an error. Still support Linux's brk(0) query idiom,
* which OSF programs just shouldn't be doing. We're still not quite
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a5..af9405cd70e5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
data8 sys_ni_syscall
data8 sys_umask /* 60 */
data8 sys_chroot
- data8 sys_ustat
+ data8 compat_sys_ustat
data8 sys_dup2
data8 sys_getppid
data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309b..5c0f408cfd71 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
return 1;
}
-static struct dentry_operations pfmfs_dentry_operations = {
+static const struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
};
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 49aac6e17df9..2a472713de8e 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -355,40 +355,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality)
return ret;
}
-/* ustat compatibility */
-struct ustat32 {
- compat_daddr_t f_tfree;
- compat_ino_t f_tinode;
- char f_fname[6];
- char f_fpack[6];
-};
-
-extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
-
-SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
-{
- int err;
- struct ustat tmp;
- struct ustat32 tmp32;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- err = sys_ustat(dev, (struct ustat __user *)&tmp);
- set_fs(old_fs);
-
- if (err)
- goto out;
-
- memset(&tmp32, 0, sizeof(struct ustat32));
- tmp32.f_tfree = tmp.f_tfree;
- tmp32.f_tinode = tmp.f_tinode;
-
- err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
-
-out:
- return err;
-}
-
SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
compat_off_t __user *, offset, s32, count)
{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 7438e92f8a01..f61d6b0e5731 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -253,7 +253,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_utime /* 6130 */
PTR sys_mknod
PTR sys_32_personality
- PTR sys_32_ustat
+ PTR compat_sys_ustat
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 6135 */
PTR sys_sysfs
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b0fef4ff9827..60997f1f69d4 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,7 +265,7 @@ sys_call_table:
PTR sys_olduname
PTR sys_umask /* 4060 */
PTR sys_chroot
- PTR sys_32_ustat
+ PTR compat_sys_ustat
PTR sys_dup2
PTR sys_getppid
PTR sys_getpgrp /* 4065 */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 303d2b647e41..03b9a01bc16c 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -130,7 +130,7 @@
ENTRY_OURS(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
- ENTRY_SAME(ustat)
+ ENTRY_COMP(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 72353f6070a4..fe166491e9dc 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -65,7 +65,7 @@ SYSCALL(ni_syscall)
SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
COMPAT_SYS_SPU(umask)
SYSCALL_SPU(chroot)
-SYSCALL(ustat)
+COMPAT_SYS(ustat)
SYSCALL_SPU(dup2)
SYSCALL_SPU(getppid)
SYSCALL_SPU(getpgrp)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 62c706eb0de6..87cf5a79a351 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -252,7 +252,7 @@ sys32_chroot_wrapper:
sys32_ustat_wrapper:
llgfr %r2,%r2 # dev_t
llgtr %r3,%r3 # struct ustat *
- jg sys_ustat
+ jg compat_sys_ustat
.globl sys32_dup2_wrapper
sys32_dup2_wrapper:
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 6cd1a5b65067..79457f682b5a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
- * processor's in current->active_mm->cpu_vm_mask and performing the
+ * processor's in current->mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
@@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
else
smp_cross_call_masked(&xcall_flush_tlb_pending,
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f93c42a2b522..a8000b1cda74 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -51,7 +51,7 @@ sys_call_table32:
/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
.word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
- .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
+ .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index f95066b6f805..db310aa00183 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -36,10 +36,10 @@
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/irq.h>
#include <asm/oplib.h>
#include <asm/timer.h>
+#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/starfire.h>
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index fde510b664d3..434224e2229f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -86,7 +86,7 @@ static int uml_net_rx(struct net_device *dev)
drop_skb->dev = dev;
/* Read a packet into drop_skb and don't do anything with it. */
(*lp->read)(lp->fd, drop_skb, lp);
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return 0;
}
@@ -99,8 +99,8 @@ static int uml_net_rx(struct net_device *dev)
skb_trim(skb, pkt_len);
skb->protocol = (*lp->protocol)(skb);
- lp->stats.rx_bytes += skb->len;
- lp->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
netif_rx(skb);
return pkt_len;
}
@@ -224,8 +224,8 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = (*lp->write)(lp->fd, skb, lp);
if (len == skb->len) {
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
netif_start_queue(dev);
@@ -234,7 +234,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
else if (len == 0) {
netif_start_queue(dev);
- lp->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
}
else {
netif_start_queue(dev);
@@ -248,12 +248,6 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
-{
- struct uml_net_private *lp = netdev_priv(dev);
- return &lp->stats;
-}
-
static void uml_net_set_multicast_list(struct net_device *dev)
{
return;
@@ -377,6 +371,18 @@ static void net_device_release(struct device *dev)
free_netdev(netdev);
}
+static const struct net_device_ops uml_netdev_ops = {
+ .ndo_open = uml_net_open,
+ .ndo_stop = uml_net_close,
+ .ndo_start_xmit = uml_net_start_xmit,
+ .ndo_set_multicast_list = uml_net_set_multicast_list,
+ .ndo_tx_timeout = uml_net_tx_timeout,
+ .ndo_set_mac_address = uml_net_set_mac,
+ .ndo_change_mtu = uml_net_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*
* Ensures that platform_driver_register is called only once by
* eth_configure. Will be set in an initcall.
@@ -473,14 +479,7 @@ static void eth_configure(int n, void *init, char *mac,
set_ether_mac(dev, device->mac);
dev->mtu = transport->user->mtu;
- dev->open = uml_net_open;
- dev->hard_start_xmit = uml_net_start_xmit;
- dev->stop = uml_net_close;
- dev->get_stats = uml_net_get_stats;
- dev->set_multicast_list = uml_net_set_multicast_list;
- dev->tx_timeout = uml_net_tx_timeout;
- dev->set_mac_address = uml_net_set_mac;
- dev->change_mtu = uml_net_change_mtu;
+ dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;
dev->watchdog_timeo = (HZ >> 1);
dev->irq = UM_ETH_IRQ;
diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h
index d843c7924a7c..5c367f22595b 100644
--- a/arch/um/include/shared/net_kern.h
+++ b/arch/um/include/shared/net_kern.h
@@ -26,7 +26,7 @@ struct uml_net_private {
spinlock_t lock;
struct net_device *dev;
struct timer_list tl;
- struct net_device_stats stats;
+
struct work_struct work;
int fd;
unsigned char mac[ETH_ALEN];
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 097a6b64c24d..db0c803170ab 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -557,7 +557,7 @@ ia32_sys_call_table:
.quad sys32_olduname
.quad sys_umask /* 60 */
.quad sys_chroot
- .quad sys32_ustat
+ .quad compat_sys_ustat
.quad sys_dup2
.quad sys_getppid
.quad sys_getpgrp /* 65 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 6c0d7f6231af..efac92fd1efb 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name)
return err ? -EFAULT : 0;
}
-long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
-{
- struct ustat u;
- mm_segment_t seg;
- int ret;
-
- seg = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ustat(dev, (struct ustat __user *)&u);
- set_fs(seg);
- if (ret < 0)
- return ret;
-
- if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
- __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
- __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
- __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
- __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
- ret = -EFAULT;
- return ret;
-}
-
asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs *regs)
{
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 50ca486fd88c..1f7e62517284 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -129,13 +129,6 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
-struct ustat32 {
- __u32 f_tfree;
- compat_ino_t f_tinode;
- char f_fname[6];
- char f_fpack[6];
-};
-
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index ffb08be2a530..72a6dcd1299b 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -70,8 +70,6 @@ struct old_utsname;
asmlinkage long sys32_olduname(struct oldold_utsname __user *);
long sys32_uname(struct old_utsname __user *);
-long sys32_ustat(unsigned, struct ustat32 __user *);
-
asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
compat_uptr_t __user *, struct pt_regs *);
asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4373adb2119a..9d9490e22e07 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -26,6 +26,10 @@
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -60,7 +64,12 @@
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+
+#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4; /* add in BIOS popup space */
- } else if (IS_G33) {
+ } else if (IS_G33 && !IS_IGD) {
/* G33's GTT size defined in gmch_ctrl */
switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4;
- } else if (IS_G4X) {
+ } else if (IS_G4X || IS_IGD) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting. However,
* 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+ NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+ NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
"Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a5dd7a665aa8..8b8c8c22f0fc 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -63,8 +63,7 @@ static int descriptor_count;
#define BIB_CMC ((1) << 30)
#define BIB_IMC ((1) << 31)
-static u32 *
-generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
{
struct fw_descriptor *desc;
static u32 config_rom[256];
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
return config_rom;
}
-static void
-update_config_roms(void)
+static void update_config_roms(void)
{
struct fw_card *card;
u32 *config_rom;
@@ -141,8 +139,7 @@ update_config_roms(void)
}
}
-int
-fw_core_add_descriptor(struct fw_descriptor *desc)
+int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
-void
-fw_core_remove_descriptor(struct fw_descriptor *desc)
+void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
mutex_lock(&card_mutex);
@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
+static int set_broadcast_channel(struct device *dev, void *data)
+{
+ fw_device_set_broadcast_channel(fw_device(dev), (long)data);
+ return 0;
+}
+
+static void allocate_broadcast_channel(struct fw_card *card, int generation)
+{
+ int channel, bandwidth = 0;
+
+ fw_iso_resource_manage(card, generation, 1ULL << 31,
+ &channel, &bandwidth, true);
+ if (channel == 31) {
+ card->broadcast_channel_allocated = true;
+ device_for_each_child(card->device, (void *)(long)generation,
+ set_broadcast_channel);
+ }
+}
+
static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
};
-void
-fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
int scheduled;
@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void
-fw_card_bm_work(struct work_struct *work)
+static void fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
struct fw_device *root_device;
- struct fw_node *root_node, *local_node;
+ struct fw_node *root_node;
unsigned long flags;
- int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
+ int root_id, new_root_id, irm_id, local_id;
+ int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
__be32 lock_data[2];
spin_lock_irqsave(&card->lock, flags);
- local_node = card->local_node;
- root_node = card->root_node;
- if (local_node == NULL) {
+ if (card->local_node == NULL) {
spin_unlock_irqrestore(&card->lock, flags);
goto out_put_card;
}
- fw_node_get(local_node);
- fw_node_get(root_node);
generation = card->generation;
+ root_node = card->root_node;
+ fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
- root_id = root_node->node_id;
- grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
+
+ grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if (is_next_generation(generation, card->bm_generation) ||
(card->bm_generation != generation && grace)) {
@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
* next generation.
*/
- irm_id = card->irm_node->node_id;
if (!card->irm_node->link_on) {
- new_root_id = local_node->node_id;
+ new_root_id = local_id;
fw_notify("IRM has link off, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
}
lock_data[0] = cpu_to_be32(0x3f);
- lock_data[1] = cpu_to_be32(local_node->node_id);
+ lock_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags);
@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
goto out;
if (rcode == RCODE_COMPLETE &&
- lock_data[0] != cpu_to_be32(0x3f))
- /* Somebody else is BM, let them do the work. */
+ lock_data[0] != cpu_to_be32(0x3f)) {
+
+ /* Somebody else is BM. Only act as IRM. */
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+
goto out;
+ }
spin_lock_irqsave(&card->lock, flags);
@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
* do a bus reset and pick the local node as
* root, and thus, IRM.
*/
- new_root_id = local_node->node_id;
+ new_root_id = local_id;
fw_notify("BM lock failed, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
/*
- * OK, we weren't BM in the last generation, and it's
- * less than 100ms since last bus reset. Reschedule
- * this task 100ms from now.
+ * We weren't BM in the last generation, and the last
+ * bus reset is less than 125ms ago. Reschedule this job.
*/
spin_unlock_irqrestore(&card->lock, flags);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10));
+ fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
*/
- new_root_id = local_node->node_id;
+ new_root_id = local_id;
} else if (!root_device_is_running) {
/*
* If we haven't probed this device yet, bail out now
@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
* successfully read the config rom, but it's not
* cycle master capable.
*/
- new_root_id = local_node->node_id;
+ new_root_id = local_id;
}
pick_me:
@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
fw_core_initiate_bus_reset(card, 1);
+ /* Will allocate broadcast channel after the reset. */
+ } else {
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
}
+
out:
fw_node_put(root_node);
- fw_node_put(local_node);
out_put_card:
fw_card_put(card);
}
-static void
-flush_timer_callback(unsigned long data)
+static void flush_timer_callback(unsigned long data)
{
struct fw_card *card = (struct fw_card *)data;
fw_flush_transactions(card);
}
-void
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
- struct device *device)
+void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver,
+ struct device *device)
{
static atomic_t index = ATOMIC_INIT(-1);
@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
}
EXPORT_SYMBOL(fw_card_initialize);
-int
-fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid)
+int fw_card_add(struct fw_card *card,
+ u32 max_receive, u32 link_speed, u64 guid)
{
u32 *config_rom;
size_t length;
- int err;
+ int ret;
card->max_receive = max_receive;
card->link_speed = link_speed;
@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
list_add_tail(&card->link, &card_list);
mutex_unlock(&card_mutex);
- err = card->driver->enable(card, config_rom, length);
- if (err < 0) {
+ ret = card->driver->enable(card, config_rom, length);
+ if (ret < 0) {
mutex_lock(&card_mutex);
list_del(&card->link);
mutex_unlock(&card_mutex);
}
- return err;
+
+ return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
* dummy driver just fails all IO.
*/
-static int
-dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
{
BUG();
return -1;
}
-static int
-dummy_update_phy_reg(struct fw_card *card, int address,
- int clear_bits, int set_bits)
+static int dummy_update_phy_reg(struct fw_card *card, int address,
+ int clear_bits, int set_bits)
{
return -ENODEV;
}
-static int
-dummy_set_config_rom(struct fw_card *card,
- u32 *config_rom, size_t length)
+static int dummy_set_config_rom(struct fw_card *card,
+ u32 *config_rom, size_t length)
{
/*
* We take the card out of card_list before setting the dummy
@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
return -1;
}
-static void
-dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
}
-static void
-dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
}
-static int
-dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
return -ENOENT;
}
-static int
-dummy_enable_phys_dma(struct fw_card *card,
- int node_id, int generation)
+static int dummy_enable_phys_dma(struct fw_card *card,
+ int node_id, int generation)
{
return -ENODEV;
}
@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
.enable_phys_dma = dummy_enable_phys_dma,
};
-void
-fw_card_release(struct kref *kref)
+void fw_card_release(struct kref *kref)
{
struct fw_card *card = container_of(kref, struct fw_card, kref);
complete(&card->done);
}
-void
-fw_core_remove_card(struct fw_card *card)
+void fw_core_remove_card(struct fw_card *card)
{
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
}
EXPORT_SYMBOL(fw_core_remove_card);
-int
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index ed03234cbea8..7eb6594cc3e5 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -18,87 +18,162 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/firewire-cdev.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/preempt.h>
+#include <linux/spinlock.h>
#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/idr.h>
-#include <linux/compat.h>
-#include <linux/firewire-cdev.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
#include <asm/system.h>
#include <asm/uaccess.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
#include "fw-device.h"
+#include "fw-topology.h"
+#include "fw-transaction.h"
+
+struct client {
+ u32 version;
+ struct fw_device *device;
+
+ spinlock_t lock;
+ bool in_shutdown;
+ struct idr resource_idr;
+ struct list_head event_list;
+ wait_queue_head_t wait;
+ u64 bus_reset_closure;
+
+ struct fw_iso_context *iso_context;
+ u64 iso_closure;
+ struct fw_iso_buffer buffer;
+ unsigned long vm_start;
-struct client;
-struct client_resource {
struct list_head link;
- void (*release)(struct client *client, struct client_resource *r);
- u32 handle;
+ struct kref kref;
};
+static inline void client_get(struct client *client)
+{
+ kref_get(&client->kref);
+}
+
+static void client_release(struct kref *kref)
+{
+ struct client *client = container_of(kref, struct client, kref);
+
+ fw_device_put(client->device);
+ kfree(client);
+}
+
+static void client_put(struct client *client)
+{
+ kref_put(&client->kref, client_release);
+}
+
+struct client_resource;
+typedef void (*client_resource_release_fn_t)(struct client *,
+ struct client_resource *);
+struct client_resource {
+ client_resource_release_fn_t release;
+ int handle;
+};
+
+struct address_handler_resource {
+ struct client_resource resource;
+ struct fw_address_handler handler;
+ __u64 closure;
+ struct client *client;
+};
+
+struct outbound_transaction_resource {
+ struct client_resource resource;
+ struct fw_transaction transaction;
+};
+
+struct inbound_transaction_resource {
+ struct client_resource resource;
+ struct fw_request *request;
+ void *data;
+ size_t length;
+};
+
+struct descriptor_resource {
+ struct client_resource resource;
+ struct fw_descriptor descriptor;
+ u32 data[0];
+};
+
+struct iso_resource {
+ struct client_resource resource;
+ struct client *client;
+ /* Schedule work and access todo only with client->lock held. */
+ struct delayed_work work;
+ enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
+ ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
+ int generation;
+ u64 channels;
+ s32 bandwidth;
+ struct iso_resource_event *e_alloc, *e_dealloc;
+};
+
+static void schedule_iso_resource(struct iso_resource *);
+static void release_iso_resource(struct client *, struct client_resource *);
+
/*
* dequeue_event() just kfree()'s the event, so the event has to be
- * the first field in the struct.
+ * the first field in a struct XYZ_event.
*/
-
struct event {
struct { void *data; size_t size; } v[2];
struct list_head link;
};
-struct bus_reset {
+struct bus_reset_event {
struct event event;
struct fw_cdev_event_bus_reset reset;
};
-struct response {
+struct outbound_transaction_event {
struct event event;
- struct fw_transaction transaction;
struct client *client;
- struct client_resource resource;
+ struct outbound_transaction_resource r;
struct fw_cdev_event_response response;
};
-struct iso_interrupt {
+struct inbound_transaction_event {
struct event event;
- struct fw_cdev_event_iso_interrupt interrupt;
+ struct fw_cdev_event_request request;
};
-struct client {
- u32 version;
- struct fw_device *device;
- spinlock_t lock;
- u32 resource_handle;
- struct list_head resource_list;
- struct list_head event_list;
- wait_queue_head_t wait;
- u64 bus_reset_closure;
-
- struct fw_iso_context *iso_context;
- u64 iso_closure;
- struct fw_iso_buffer buffer;
- unsigned long vm_start;
+struct iso_interrupt_event {
+ struct event event;
+ struct fw_cdev_event_iso_interrupt interrupt;
+};
- struct list_head link;
+struct iso_resource_event {
+ struct event event;
+ struct fw_cdev_event_iso_resource resource;
};
-static inline void __user *
-u64_to_uptr(__u64 value)
+static inline void __user *u64_to_uptr(__u64 value)
{
return (void __user *)(unsigned long)value;
}
-static inline __u64
-uptr_to_u64(void __user *ptr)
+static inline __u64 uptr_to_u64(void __user *ptr)
{
return (__u64)(unsigned long)ptr;
}
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
{
struct fw_device *device;
struct client *client;
- unsigned long flags;
device = fw_device_get_by_devt(inode->i_rdev);
if (device == NULL)
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
}
client->device = device;
- INIT_LIST_HEAD(&client->event_list);
- INIT_LIST_HEAD(&client->resource_list);
spin_lock_init(&client->lock);
+ idr_init(&client->resource_idr);
+ INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
+ kref_init(&client->kref);
file->private_data = client;
- spin_lock_irqsave(&device->card->lock, flags);
+ mutex_lock(&device->client_list_mutex);
list_add_tail(&client->link, &device->client_list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ mutex_unlock(&device->client_list_mutex);
return 0;
}
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event,
event->v[1].size = size1;
spin_lock_irqsave(&client->lock, flags);
- list_add_tail(&event->link, &client->event_list);
+ if (client->in_shutdown)
+ kfree(event);
+ else
+ list_add_tail(&event->link, &client->event_list);
spin_unlock_irqrestore(&client->lock, flags);
wake_up_interruptible(&client->wait);
}
-static int
-dequeue_event(struct client *client, char __user *buffer, size_t count)
+static int dequeue_event(struct client *client,
+ char __user *buffer, size_t count)
{
- unsigned long flags;
struct event *event;
size_t size, total;
- int i, retval;
+ int i, ret;
- retval = wait_event_interruptible(client->wait,
- !list_empty(&client->event_list) ||
- fw_device_is_shutdown(client->device));
- if (retval < 0)
- return retval;
+ ret = wait_event_interruptible(client->wait,
+ !list_empty(&client->event_list) ||
+ fw_device_is_shutdown(client->device));
+ if (ret < 0)
+ return ret;
if (list_empty(&client->event_list) &&
fw_device_is_shutdown(client->device))
return -ENODEV;
- spin_lock_irqsave(&client->lock, flags);
- event = container_of(client->event_list.next, struct event, link);
+ spin_lock_irq(&client->lock);
+ event = list_first_entry(&client->event_list, struct event, link);
list_del(&event->link);
- spin_unlock_irqrestore(&client->lock, flags);
+ spin_unlock_irq(&client->lock);
total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
size = min(event->v[i].size, count - total);
if (copy_to_user(buffer + total, event->v[i].data, size)) {
- retval = -EFAULT;
+ ret = -EFAULT;
goto out;
}
total += size;
}
- retval = total;
+ ret = total;
out:
kfree(event);
- return retval;
+ return ret;
}
-static ssize_t
-fw_device_op_read(struct file *file,
- char __user *buffer, size_t count, loff_t *offset)
+static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset)
{
struct client *client = file->private_data;
return dequeue_event(client, buffer, count);
}
-/* caller must hold card->lock so that node pointers can be dereferenced here */
-static void
-fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
- struct client *client)
+static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+ struct client *client)
{
struct fw_card *card = client->device->card;
+ spin_lock_irq(&card->lock);
+
event->closure = client->bus_reset_closure;
event->type = FW_CDEV_EVENT_BUS_RESET;
event->generation = client->device->generation;
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->bm_node_id = 0; /* FIXME: We don't track the BM. */
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
+
+ spin_unlock_irq(&card->lock);
}
-static void
-for_each_client(struct fw_device *device,
- void (*callback)(struct client *client))
+static void for_each_client(struct fw_device *device,
+ void (*callback)(struct client *client))
{
- struct fw_card *card = device->card;
struct client *c;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
+ mutex_lock(&device->client_list_mutex);
list_for_each_entry(c, &device->client_list, link)
callback(c);
+ mutex_unlock(&device->client_list_mutex);
+}
+
+static int schedule_reallocations(int id, void *p, void *data)
+{
+ struct client_resource *r = p;
- spin_unlock_irqrestore(&card->lock, flags);
+ if (r->release == release_iso_resource)
+ schedule_iso_resource(container_of(r,
+ struct iso_resource, resource));
+ return 0;
}
-static void
-queue_bus_reset_event(struct client *client)
+static void queue_bus_reset_event(struct client *client)
{
- struct bus_reset *bus_reset;
+ struct bus_reset_event *e;
- bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
- if (bus_reset == NULL) {
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL) {
fw_notify("Out of memory when allocating bus reset event\n");
return;
}
- fill_bus_reset_event(&bus_reset->reset, client);
+ fill_bus_reset_event(&e->reset, client);
+
+ queue_event(client, &e->event,
+ &e->reset, sizeof(e->reset), NULL, 0);
- queue_event(client, &bus_reset->event,
- &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
+ spin_lock_irq(&client->lock);
+ idr_for_each(&client->resource_idr, schedule_reallocations, client);
+ spin_unlock_irq(&client->lock);
}
void fw_device_cdev_update(struct fw_device *device)
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer)
{
struct fw_cdev_get_info *get_info = buffer;
struct fw_cdev_event_bus_reset bus_reset;
- struct fw_card *card = client->device->card;
unsigned long ret = 0;
client->version = get_info->version;
get_info->version = FW_CDEV_VERSION;
+ get_info->card = client->device->card->index;
down_read(&fw_device_rwsem);
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer)
client->bus_reset_closure = get_info->bus_reset_closure;
if (get_info->bus_reset != 0) {
void __user *uptr = u64_to_uptr(get_info->bus_reset);
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
fill_bus_reset_event(&bus_reset, client);
- spin_unlock_irqrestore(&card->lock, flags);
-
if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
return -EFAULT;
}
- get_info->card = card->index;
-
return 0;
}
-static void
-add_client_resource(struct client *client, struct client_resource *resource)
+static int add_client_resource(struct client *client,
+ struct client_resource *resource, gfp_t gfp_mask)
{
unsigned long flags;
+ int ret;
+
+ retry:
+ if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
+ return -ENOMEM;
spin_lock_irqsave(&client->lock, flags);
- list_add_tail(&resource->link, &client->resource_list);
- resource->handle = client->resource_handle++;
+ if (client->in_shutdown)
+ ret = -ECANCELED;
+ else
+ ret = idr_get_new(&client->resource_idr, resource,
+ &resource->handle);
+ if (ret >= 0) {
+ client_get(client);
+ if (resource->release == release_iso_resource)
+ schedule_iso_resource(container_of(resource,
+ struct iso_resource, resource));
+ }
spin_unlock_irqrestore(&client->lock, flags);
+
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret < 0 ? ret : 0;
}
-static int
-release_client_resource(struct client *client, u32 handle,
- struct client_resource **resource)
+static int release_client_resource(struct client *client, u32 handle,
+ client_resource_release_fn_t release,
+ struct client_resource **resource)
{
struct client_resource *r;
- unsigned long flags;
- spin_lock_irqsave(&client->lock, flags);
- list_for_each_entry(r, &client->resource_list, link) {
- if (r->handle == handle) {
- list_del(&r->link);
- break;
- }
- }
- spin_unlock_irqrestore(&client->lock, flags);
+ spin_lock_irq(&client->lock);
+ if (client->in_shutdown)
+ r = NULL;
+ else
+ r = idr_find(&client->resource_idr, handle);
+ if (r && r->release == release)
+ idr_remove(&client->resource_idr, handle);
+ spin_unlock_irq(&client->lock);
- if (&r->link == &client->resource_list)
+ if (!(r && r->release == release))
return -EINVAL;
if (resource)
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle,
else
r->release(client, r);
+ client_put(client);
+
return 0;
}
-static void
-release_transaction(struct client *client, struct client_resource *resource)
+static void release_transaction(struct client *client,
+ struct client_resource *resource)
{
- struct response *response =
- container_of(resource, struct response, resource);
+ struct outbound_transaction_resource *r = container_of(resource,
+ struct outbound_transaction_resource, resource);
- fw_cancel_transaction(client->device->card, &response->transaction);
+ fw_cancel_transaction(client->device->card, &r->transaction);
}
-static void
-complete_transaction(struct fw_card *card, int rcode,
- void *payload, size_t length, void *data)
+static void complete_transaction(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
{
- struct response *response = data;
- struct client *client = response->client;
+ struct outbound_transaction_event *e = data;
+ struct fw_cdev_event_response *rsp = &e->response;
+ struct client *client = e->client;
unsigned long flags;
- struct fw_cdev_event_response *r = &response->response;
- if (length < r->length)
- r->length = length;
+ if (length < rsp->length)
+ rsp->length = length;
if (rcode == RCODE_COMPLETE)
- memcpy(r->data, payload, r->length);
+ memcpy(rsp->data, payload, rsp->length);
spin_lock_irqsave(&client->lock, flags);
- list_del(&response->resource.link);
+ /*
+ * 1. If called while in shutdown, the idr tree must be left untouched.
+ * The idr handle will be removed and the client reference will be
+ * dropped later.
+ * 2. If the call chain was release_client_resource ->
+ * release_transaction -> complete_transaction (instead of a normal
+ * conclusion of the transaction), i.e. if this resource was already
+ * unregistered from the idr, the client reference will be dropped
+ * by release_client_resource and we must not drop it here.
+ */
+ if (!client->in_shutdown &&
+ idr_find(&client->resource_idr, e->r.resource.handle)) {
+ idr_remove(&client->resource_idr, e->r.resource.handle);
+ /* Drop the idr's reference */
+ client_put(client);
+ }
spin_unlock_irqrestore(&client->lock, flags);
- r->type = FW_CDEV_EVENT_RESPONSE;
- r->rcode = rcode;
+ rsp->type = FW_CDEV_EVENT_RESPONSE;
+ rsp->rcode = rcode;
/*
- * In the case that sizeof(*r) doesn't align with the position of the
+ * In the case that sizeof(*rsp) doesn't align with the position of the
* data, and the read is short, preserve an extra copy of the data
* to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
* for short reads and some apps depended on it, this is both safe
* and prudent for compatibility.
*/
- if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
- queue_event(client, &response->event, r, sizeof(*r),
- r->data, r->length);
+ if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
+ queue_event(client, &e->event, rsp, sizeof(*rsp),
+ rsp->data, rsp->length);
else
- queue_event(client, &response->event, r, sizeof(*r) + r->length,
+ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
NULL, 0);
+
+ /* Drop the transaction callback's reference */
+ client_put(client);
}
-static int ioctl_send_request(struct client *client, void *buffer)
+static int init_request(struct client *client,
+ struct fw_cdev_send_request *request,
+ int destination_id, int speed)
{
- struct fw_device *device = client->device;
- struct fw_cdev_send_request *request = buffer;
- struct response *response;
+ struct outbound_transaction_event *e;
+ int ret;
- /* What is the biggest size we'll accept, really? */
- if (request->length > 4096)
- return -EINVAL;
+ if (request->tcode != TCODE_STREAM_DATA &&
+ (request->length > 4096 || request->length > 512 << speed))
+ return -EIO;
- response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
- if (response == NULL)
+ e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
+ if (e == NULL)
return -ENOMEM;
- response->client = client;
- response->response.length = request->length;
- response->response.closure = request->closure;
+ e->client = client;
+ e->response.length = request->length;
+ e->response.closure = request->closure;
if (request->data &&
- copy_from_user(response->response.data,
+ copy_from_user(e->response.data,
u64_to_uptr(request->data), request->length)) {
- kfree(response);
- return -EFAULT;
+ ret = -EFAULT;
+ goto failed;
}
- response->resource.release = release_transaction;
- add_client_resource(client, &response->resource);
+ e->r.resource.release = release_transaction;
+ ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
+ if (ret < 0)
+ goto failed;
- fw_send_request(device->card, &response->transaction,
- request->tcode & 0x1f,
- device->node->node_id,
- request->generation,
- device->max_speed,
- request->offset,
- response->response.data, request->length,
- complete_transaction, response);
+ /* Get a reference for the transaction callback */
+ client_get(client);
- if (request->data)
- return sizeof(request) + request->length;
- else
- return sizeof(request);
+ fw_send_request(client->device->card, &e->r.transaction,
+ request->tcode, destination_id, request->generation,
+ speed, request->offset, e->response.data,
+ request->length, complete_transaction, e);
+ return 0;
+
+ failed:
+ kfree(e);
+
+ return ret;
}
-struct address_handler {
- struct fw_address_handler handler;
- __u64 closure;
- struct client *client;
- struct client_resource resource;
-};
+static int ioctl_send_request(struct client *client, void *buffer)
+{
+ struct fw_cdev_send_request *request = buffer;
-struct request {
- struct fw_request *request;
- void *data;
- size_t length;
- struct client_resource resource;
-};
+ switch (request->tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_QUADLET_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_LOCK_MASK_SWAP:
+ case TCODE_LOCK_COMPARE_SWAP:
+ case TCODE_LOCK_FETCH_ADD:
+ case TCODE_LOCK_LITTLE_ADD:
+ case TCODE_LOCK_BOUNDED_ADD:
+ case TCODE_LOCK_WRAP_ADD:
+ case TCODE_LOCK_VENDOR_DEPENDENT:
+ break;
+ default:
+ return -EINVAL;
+ }
-struct request_event {
- struct event event;
- struct fw_cdev_event_request request;
-};
+ return init_request(client, request, client->device->node_id,
+ client->device->max_speed);
+}
-static void
-release_request(struct client *client, struct client_resource *resource)
+static void release_request(struct client *client,
+ struct client_resource *resource)
{
- struct request *request =
- container_of(resource, struct request, resource);
+ struct inbound_transaction_resource *r = container_of(resource,
+ struct inbound_transaction_resource, resource);
- fw_send_response(client->device->card, request->request,
+ fw_send_response(client->device->card, r->request,
RCODE_CONFLICT_ERROR);
- kfree(request);
+ kfree(r);
}
-static void
-handle_request(struct fw_card *card, struct fw_request *r,
- int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+static void handle_request(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
{
- struct address_handler *handler = callback_data;
- struct request *request;
- struct request_event *e;
- struct client *client = handler->client;
+ struct address_handler_resource *handler = callback_data;
+ struct inbound_transaction_resource *r;
+ struct inbound_transaction_event *e;
+ int ret;
- request = kmalloc(sizeof(*request), GFP_ATOMIC);
+ r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (request == NULL || e == NULL) {
- kfree(request);
- kfree(e);
- fw_send_response(card, r, RCODE_CONFLICT_ERROR);
- return;
- }
+ if (r == NULL || e == NULL)
+ goto failed;
- request->request = r;
- request->data = payload;
- request->length = length;
+ r->request = request;
+ r->data = payload;
+ r->length = length;
- request->resource.release = release_request;
- add_client_resource(client, &request->resource);
+ r->resource.release = release_request;
+ ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
+ if (ret < 0)
+ goto failed;
e->request.type = FW_CDEV_EVENT_REQUEST;
e->request.tcode = tcode;
e->request.offset = offset;
e->request.length = length;
- e->request.handle = request->resource.handle;
+ e->request.handle = r->resource.handle;
e->request.closure = handler->closure;
- queue_event(client, &e->event,
+ queue_event(handler->client, &e->event,
&e->request, sizeof(e->request), payload, length);
+ return;
+
+ failed:
+ kfree(r);
+ kfree(e);
+ fw_send_response(card, request, RCODE_CONFLICT_ERROR);
}
-static void
-release_address_handler(struct client *client,
- struct client_resource *resource)
+static void release_address_handler(struct client *client,
+ struct client_resource *resource)
{
- struct address_handler *handler =
- container_of(resource, struct address_handler, resource);
+ struct address_handler_resource *r =
+ container_of(resource, struct address_handler_resource, resource);
- fw_core_remove_address_handler(&handler->handler);
- kfree(handler);
+ fw_core_remove_address_handler(&r->handler);
+ kfree(r);
}
static int ioctl_allocate(struct client *client, void *buffer)
{
struct fw_cdev_allocate *request = buffer;
- struct address_handler *handler;
+ struct address_handler_resource *r;
struct fw_address_region region;
+ int ret;
- handler = kmalloc(sizeof(*handler), GFP_KERNEL);
- if (handler == NULL)
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (r == NULL)
return -ENOMEM;
region.start = request->offset;
region.end = request->offset + request->length;
- handler->handler.length = request->length;
- handler->handler.address_callback = handle_request;
- handler->handler.callback_data = handler;
- handler->closure = request->closure;
- handler->client = client;
-
- if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
- kfree(handler);
- return -EBUSY;
+ r->handler.length = request->length;
+ r->handler.address_callback = handle_request;
+ r->handler.callback_data = r;
+ r->closure = request->closure;
+ r->client = client;
+
+ ret = fw_core_add_address_handler(&r->handler, &region);
+ if (ret < 0) {
+ kfree(r);
+ return ret;
}
- handler->resource.release = release_address_handler;
- add_client_resource(client, &handler->resource);
- request->handle = handler->resource.handle;
+ r->resource.release = release_address_handler;
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+ if (ret < 0) {
+ release_address_handler(client, &r->resource);
+ return ret;
+ }
+ request->handle = r->resource.handle;
return 0;
}
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer)
{
struct fw_cdev_deallocate *request = buffer;
- return release_client_resource(client, request->handle, NULL);
+ return release_client_resource(client, request->handle,
+ release_address_handler, NULL);
}
static int ioctl_send_response(struct client *client, void *buffer)
{
struct fw_cdev_send_response *request = buffer;
struct client_resource *resource;
- struct request *r;
+ struct inbound_transaction_resource *r;
- if (release_client_resource(client, request->handle, &resource) < 0)
+ if (release_client_resource(client, request->handle,
+ release_request, &resource) < 0)
return -EINVAL;
- r = container_of(resource, struct request, resource);
+
+ r = container_of(resource, struct inbound_transaction_resource,
+ resource);
if (request->length < r->length)
r->length = request->length;
if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
return fw_core_initiate_bus_reset(client->device->card, short_reset);
}
-struct descriptor {
- struct fw_descriptor d;
- struct client_resource resource;
- u32 data[0];
-};
-
static void release_descriptor(struct client *client,
struct client_resource *resource)
{
- struct descriptor *descriptor =
- container_of(resource, struct descriptor, resource);
+ struct descriptor_resource *r =
+ container_of(resource, struct descriptor_resource, resource);
- fw_core_remove_descriptor(&descriptor->d);
- kfree(descriptor);
+ fw_core_remove_descriptor(&r->descriptor);
+ kfree(r);
}
static int ioctl_add_descriptor(struct client *client, void *buffer)
{
struct fw_cdev_add_descriptor *request = buffer;
- struct descriptor *descriptor;
- int retval;
+ struct fw_card *card = client->device->card;
+ struct descriptor_resource *r;
+ int ret;
+
+ /* Access policy: Allow this ioctl only on local nodes' device files. */
+ spin_lock_irq(&card->lock);
+ ret = client->device->node_id != card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
+ if (ret)
+ return -ENOSYS;
if (request->length > 256)
return -EINVAL;
- descriptor =
- kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
- if (descriptor == NULL)
+ r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+ if (r == NULL)
return -ENOMEM;
- if (copy_from_user(descriptor->data,
+ if (copy_from_user(r->data,
u64_to_uptr(request->data), request->length * 4)) {
- kfree(descriptor);
- return -EFAULT;
+ ret = -EFAULT;
+ goto failed;
}
- descriptor->d.length = request->length;
- descriptor->d.immediate = request->immediate;
- descriptor->d.key = request->key;
- descriptor->d.data = descriptor->data;
+ r->descriptor.length = request->length;
+ r->descriptor.immediate = request->immediate;
+ r->descriptor.key = request->key;
+ r->descriptor.data = r->data;
- retval = fw_core_add_descriptor(&descriptor->d);
- if (retval < 0) {
- kfree(descriptor);
- return retval;
- }
+ ret = fw_core_add_descriptor(&r->descriptor);
+ if (ret < 0)
+ goto failed;
- descriptor->resource.release = release_descriptor;
- add_client_resource(client, &descriptor->resource);
- request->handle = descriptor->resource.handle;
+ r->resource.release = release_descriptor;
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+ if (ret < 0) {
+ fw_core_remove_descriptor(&r->descriptor);
+ goto failed;
+ }
+ request->handle = r->resource.handle;
return 0;
+ failed:
+ kfree(r);
+
+ return ret;
}
static int ioctl_remove_descriptor(struct client *client, void *buffer)
{
struct fw_cdev_remove_descriptor *request = buffer;
- return release_client_resource(client, request->handle, NULL);
+ return release_client_resource(client, request->handle,
+ release_descriptor, NULL);
}
-static void
-iso_callback(struct fw_iso_context *context, u32 cycle,
- size_t header_length, void *header, void *data)
+static void iso_callback(struct fw_iso_context *context, u32 cycle,
+ size_t header_length, void *header, void *data)
{
struct client *client = data;
- struct iso_interrupt *irq;
+ struct iso_interrupt_event *e;
- irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
- if (irq == NULL)
+ e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+ if (e == NULL)
return;
- irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
- irq->interrupt.closure = client->iso_closure;
- irq->interrupt.cycle = cycle;
- irq->interrupt.header_length = header_length;
- memcpy(irq->interrupt.header, header, header_length);
- queue_event(client, &irq->event, &irq->interrupt,
- sizeof(irq->interrupt) + header_length, NULL, 0);
+ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
+ e->interrupt.closure = client->iso_closure;
+ e->interrupt.cycle = cycle;
+ e->interrupt.header_length = header_length;
+ memcpy(e->interrupt.header, header, header_length);
+ queue_event(client, &e->event, &e->interrupt,
+ sizeof(e->interrupt) + header_length, NULL, 0);
}
static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer)
return 0;
}
+static void iso_resource_work(struct work_struct *work)
+{
+ struct iso_resource_event *e;
+ struct iso_resource *r =
+ container_of(work, struct iso_resource, work.work);
+ struct client *client = r->client;
+ int generation, channel, bandwidth, todo;
+ bool skip, free, success;
+
+ spin_lock_irq(&client->lock);
+ generation = client->device->generation;
+ todo = r->todo;
+ /* Allow 1000ms grace period for other reallocations. */
+ if (todo == ISO_RES_ALLOC &&
+ time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+ if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
+ client_get(client);
+ skip = true;
+ } else {
+ /* We could be called twice within the same generation. */
+ skip = todo == ISO_RES_REALLOC &&
+ r->generation == generation;
+ }
+ free = todo == ISO_RES_DEALLOC ||
+ todo == ISO_RES_ALLOC_ONCE ||
+ todo == ISO_RES_DEALLOC_ONCE;
+ r->generation = generation;
+ spin_unlock_irq(&client->lock);
+
+ if (skip)
+ goto out;
+
+ bandwidth = r->bandwidth;
+
+ fw_iso_resource_manage(client->device->card, generation,
+ r->channels, &channel, &bandwidth,
+ todo == ISO_RES_ALLOC ||
+ todo == ISO_RES_REALLOC ||
+ todo == ISO_RES_ALLOC_ONCE);
+ /*
+ * Is this generation outdated already? As long as this resource sticks
+ * in the idr, it will be scheduled again for a newer generation or at
+ * shutdown.
+ */
+ if (channel == -EAGAIN &&
+ (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
+ goto out;
+
+ success = channel >= 0 || bandwidth > 0;
+
+ spin_lock_irq(&client->lock);
+ /*
+ * Transit from allocation to reallocation, except if the client
+ * requested deallocation in the meantime.
+ */
+ if (r->todo == ISO_RES_ALLOC)
+ r->todo = ISO_RES_REALLOC;
+ /*
+ * Allocation or reallocation failure? Pull this resource out of the
+ * idr and prepare for deletion, unless the client is shutting down.
+ */
+ if (r->todo == ISO_RES_REALLOC && !success &&
+ !client->in_shutdown &&
+ idr_find(&client->resource_idr, r->resource.handle)) {
+ idr_remove(&client->resource_idr, r->resource.handle);
+ client_put(client);
+ free = true;
+ }
+ spin_unlock_irq(&client->lock);
+
+ if (todo == ISO_RES_ALLOC && channel >= 0)
+ r->channels = 1ULL << channel;
+
+ if (todo == ISO_RES_REALLOC && success)
+ goto out;
+
+ if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
+ e = r->e_alloc;
+ r->e_alloc = NULL;
+ } else {
+ e = r->e_dealloc;
+ r->e_dealloc = NULL;
+ }
+ e->resource.handle = r->resource.handle;
+ e->resource.channel = channel;
+ e->resource.bandwidth = bandwidth;
+
+ queue_event(client, &e->event,
+ &e->resource, sizeof(e->resource), NULL, 0);
+
+ if (free) {
+ cancel_delayed_work(&r->work);
+ kfree(r->e_alloc);
+ kfree(r->e_dealloc);
+ kfree(r);
+ }
+ out:
+ client_put(client);
+}
+
+static void schedule_iso_resource(struct iso_resource *r)
+{
+ client_get(r->client);
+ if (!schedule_delayed_work(&r->work, 0))
+ client_put(r->client);
+}
+
+static void release_iso_resource(struct client *client,
+ struct client_resource *resource)
+{
+ struct iso_resource *r =
+ container_of(resource, struct iso_resource, resource);
+
+ spin_lock_irq(&client->lock);
+ r->todo = ISO_RES_DEALLOC;
+ schedule_iso_resource(r);
+ spin_unlock_irq(&client->lock);
+}
+
+static int init_iso_resource(struct client *client,
+ struct fw_cdev_allocate_iso_resource *request, int todo)
+{
+ struct iso_resource_event *e1, *e2;
+ struct iso_resource *r;
+ int ret;
+
+ if ((request->channels == 0 && request->bandwidth == 0) ||
+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+ request->bandwidth < 0)
+ return -EINVAL;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
+ e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
+ if (r == NULL || e1 == NULL || e2 == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_DELAYED_WORK(&r->work, iso_resource_work);
+ r->client = client;
+ r->todo = todo;
+ r->generation = -1;
+ r->channels = request->channels;
+ r->bandwidth = request->bandwidth;
+ r->e_alloc = e1;
+ r->e_dealloc = e2;
+
+ e1->resource.closure = request->closure;
+ e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
+ e2->resource.closure = request->closure;
+ e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
+
+ if (todo == ISO_RES_ALLOC) {
+ r->resource.release = release_iso_resource;
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+ if (ret < 0)
+ goto fail;
+ } else {
+ r->resource.release = NULL;
+ r->resource.handle = -1;
+ schedule_iso_resource(r);
+ }
+ request->handle = r->resource.handle;
+
+ return 0;
+ fail:
+ kfree(r);
+ kfree(e1);
+ kfree(e2);
+
+ return ret;
+}
+
+static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+{
+ struct fw_cdev_allocate_iso_resource *request = buffer;
+
+ return init_iso_resource(client, request, ISO_RES_ALLOC);
+}
+
+static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+{
+ struct fw_cdev_deallocate *request = buffer;
+
+ return release_client_resource(client, request->handle,
+ release_iso_resource, NULL);
+}
+
+static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+{
+ struct fw_cdev_allocate_iso_resource *request = buffer;
+
+ return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+}
+
+static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+{
+ struct fw_cdev_allocate_iso_resource *request = buffer;
+
+ return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+}
+
+/*
+ * Returns a speed code: Maximum speed to or from this device,
+ * limited by the device's link speed, the local node's link speed,
+ * and all PHY port speeds between the two links.
+ */
+static int ioctl_get_speed(struct client *client, void *buffer)
+{
+ return client->device->max_speed;
+}
+
+static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+{
+ struct fw_cdev_send_request *request = buffer;
+
+ switch (request->tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Security policy: Only allow accesses to Units Space. */
+ if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+ return -EACCES;
+
+ return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+}
+
+static int ioctl_send_stream_packet(struct client *client, void *buffer)
+{
+ struct fw_cdev_send_stream_packet *p = buffer;
+ struct fw_cdev_send_request request;
+ int dest;
+
+ if (p->speed > client->device->card->link_speed ||
+ p->length > 1024 << p->speed)
+ return -EIO;
+
+ if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+ return -EINVAL;
+
+ dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+ request.tcode = TCODE_STREAM_DATA;
+ request.length = p->length;
+ request.closure = p->closure;
+ request.data = p->data;
+ request.generation = p->generation;
+
+ return init_request(client, &request, dest, p->speed);
+}
+
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_info,
ioctl_send_request,
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_start_iso,
ioctl_stop_iso,
ioctl_get_cycle_timer,
+ ioctl_allocate_iso_resource,
+ ioctl_deallocate_iso_resource,
+ ioctl_allocate_iso_resource_once,
+ ioctl_deallocate_iso_resource_once,
+ ioctl_get_speed,
+ ioctl_send_broadcast_request,
+ ioctl_send_stream_packet,
};
-static int
-dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
+static int dispatch_ioctl(struct client *client,
+ unsigned int cmd, void __user *arg)
{
char buffer[256];
- int retval;
+ int ret;
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
return -EFAULT;
}
- retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
- if (retval < 0)
- return retval;
+ ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+ if (ret < 0)
+ return ret;
if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
return -EFAULT;
}
- return retval;
+ return ret;
}
-static long
-fw_device_op_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static long fw_device_op_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file,
}
#ifdef CONFIG_COMPAT
-static long
-fw_device_op_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static long fw_device_op_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
struct client *client = file->private_data;
enum dma_data_direction direction;
unsigned long size;
- int page_count, retval;
+ int page_count, ret;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
else
direction = DMA_FROM_DEVICE;
- retval = fw_iso_buffer_init(&client->buffer, client->device->card,
- page_count, direction);
- if (retval < 0)
- return retval;
+ ret = fw_iso_buffer_init(&client->buffer, client->device->card,
+ page_count, direction);
+ if (ret < 0)
+ return ret;
- retval = fw_iso_buffer_map(&client->buffer, vma);
- if (retval < 0)
+ ret = fw_iso_buffer_map(&client->buffer, vma);
+ if (ret < 0)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
- return retval;
+ return ret;
+}
+
+static int shutdown_resource(int id, void *p, void *data)
+{
+ struct client_resource *r = p;
+ struct client *client = data;
+
+ r->release(client, r);
+ client_put(client);
+
+ return 0;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct event *e, *next_e;
- struct client_resource *r, *next_r;
- unsigned long flags;
- if (client->buffer.pages)
- fw_iso_buffer_destroy(&client->buffer, client->device->card);
+ mutex_lock(&client->device->client_list_mutex);
+ list_del(&client->link);
+ mutex_unlock(&client->device->client_list_mutex);
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
- list_for_each_entry_safe(r, next_r, &client->resource_list, link)
- r->release(client, r);
+ if (client->buffer.pages)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
- /*
- * FIXME: We should wait for the async tasklets to stop
- * running before freeing the memory.
- */
+ /* Freeze client->resource_idr and client->event_list */
+ spin_lock_irq(&client->lock);
+ client->in_shutdown = true;
+ spin_unlock_irq(&client->lock);
+
+ idr_for_each(&client->resource_idr, shutdown_resource, client);
+ idr_remove_all(&client->resource_idr);
+ idr_destroy(&client->resource_idr);
list_for_each_entry_safe(e, next_e, &client->event_list, link)
kfree(e);
- spin_lock_irqsave(&client->device->card->lock, flags);
- list_del(&client->link);
- spin_unlock_irqrestore(&client->device->card->lock, flags);
-
- fw_device_put(client->device);
- kfree(client);
+ client_put(client);
return 0;
}
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index bf53acb45652..a47e2129d83d 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -18,22 +18,26 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kthread.h>
-#include <linux/device.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
-#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
#include <asm/system.h>
-#include <linux/ctype.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
#include "fw-device.h"
+#include "fw-topology.h"
+#include "fw-transaction.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
{
@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
vendor, model, specifier_id, version);
}
-static int
-fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct fw_unit *unit = fw_unit(dev);
char modalias[64];
@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
};
EXPORT_SYMBOL(fw_bus_type);
-static void fw_device_release(struct device *dev)
-{
- struct fw_device *device = fw_device(dev);
- struct fw_card *card = device->card;
- unsigned long flags;
-
- /*
- * Take the card lock so we don't set this to NULL while a
- * FW_NODE_UPDATED callback is being handled or while the
- * bus manager work looks at this node.
- */
- spin_lock_irqsave(&card->lock, flags);
- device->node->data = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
-
- fw_node_put(device->node);
- kfree(device->config_rom);
- kfree(device);
- fw_card_put(card);
-}
-
int fw_device_enable_phys_dma(struct fw_device *device)
{
int generation = device->generation;
@@ -191,8 +173,8 @@ struct config_rom_attribute {
u32 key;
};
-static ssize_t
-show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
+static ssize_t show_immediate(struct device *dev,
+ struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
#define IMMEDIATE_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
-static ssize_t
-show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
+static ssize_t show_text_leaf(struct device *dev,
+ struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
};
-static void
-init_fw_attribute_group(struct device *dev,
- struct device_attribute *attrs,
- struct fw_attribute_group *group)
+static void init_fw_attribute_group(struct device *dev,
+ struct device_attribute *attrs,
+ struct fw_attribute_group *group)
{
struct device_attribute *attr;
int i, j;
@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
dev->groups = group->groups;
}
-static ssize_t
-modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fw_unit *unit = fw_unit(dev);
int length;
@@ -332,9 +312,8 @@ modalias_show(struct device *dev,
return length + 1;
}
-static ssize_t
-rom_index_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rom_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev);
@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
__ATTR_NULL,
};
-static ssize_t
-config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t config_rom_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
size_t length;
@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
return length;
}
-static ssize_t
-guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t guid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
int ret;
@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
__ATTR_NULL,
};
-static int
-read_rom(struct fw_device *device, int generation, int index, u32 *data)
+static int read_rom(struct fw_device *device,
+ int generation, int index, u32 *data)
{
int rcode;
@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
kfree(old_rom);
ret = 0;
- device->cmc = rom[2] & 1 << 30;
+ device->cmc = rom[2] >> 30 & 1;
out:
kfree(rom);
@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
fw_device_put(device);
}
+static void fw_device_release(struct device *dev)
+{
+ struct fw_device *device = fw_device(dev);
+ struct fw_card *card = device->card;
+ unsigned long flags;
+
+ /*
+ * Take the card lock so we don't set this to NULL while a
+ * FW_NODE_UPDATED callback is being handled or while the
+ * bus manager work looks at this node.
+ */
+ spin_lock_irqsave(&card->lock, flags);
+ device->node->data = NULL;
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ fw_node_put(device->node);
+ kfree(device->config_rom);
+ kfree(device);
+ fw_card_put(card);
+}
+
static struct device_type fw_device_type = {
- .release = fw_device_release,
+ .release = fw_device_release,
};
-static void fw_device_update(struct work_struct *work);
+static int update_unit(struct device *dev, void *data)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+ if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+ down(&dev->sem);
+ driver->update(unit);
+ up(&dev->sem);
+ }
+
+ return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+ struct fw_device *device =
+ container_of(work, struct fw_device, work.work);
+
+ fw_device_cdev_update(device);
+ device_for_each_child(&device->device, NULL, update_unit);
+}
/*
* If a device was pending for deletion because its node went away but its
@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
return match;
}
+enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
+
+void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
+{
+ struct fw_card *card = device->card;
+ __be32 data;
+ int rcode;
+
+ if (!card->broadcast_channel_allocated)
+ return;
+
+ if (device->bc_implemented == BC_UNKNOWN) {
+ rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
+ device->node_id, generation, device->max_speed,
+ CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+ &data, 4);
+ switch (rcode) {
+ case RCODE_COMPLETE:
+ if (data & cpu_to_be32(1 << 31)) {
+ device->bc_implemented = BC_IMPLEMENTED;
+ break;
+ }
+ /* else fall through to case address error */
+ case RCODE_ADDRESS_ERROR:
+ device->bc_implemented = BC_UNIMPLEMENTED;
+ }
+ }
+
+ if (device->bc_implemented == BC_IMPLEMENTED) {
+ data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
+ BROADCAST_CHANNEL_VALID);
+ fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ device->node_id, generation, device->max_speed,
+ CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+ &data, 4);
+ }
+}
+
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct device *revived_dev;
- int minor, err;
+ int minor, ret;
/*
* All failure paths here set node->data to NULL, so that we
@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
fw_device_get(device);
down_write(&fw_device_rwsem);
- err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+ ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
idr_get_new(&fw_device_idr, device, &minor) :
-ENOMEM;
up_write(&fw_device_rwsem);
- if (err < 0)
+ if (ret < 0)
goto error;
device->device.bus = &fw_bus_type;
@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
device->config_rom[3], device->config_rom[4],
1 << device->max_speed);
device->config_rom_retries = 0;
+
+ fw_device_set_broadcast_channel(device, device->generation);
}
/*
@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
put_device(&device->device); /* our reference */
}
-static int update_unit(struct device *dev, void *data)
-{
- struct fw_unit *unit = fw_unit(dev);
- struct fw_driver *driver = (struct fw_driver *)dev->driver;
-
- if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
- down(&dev->sem);
- driver->update(unit);
- up(&dev->sem);
- }
-
- return 0;
-}
-
-static void fw_device_update(struct work_struct *work)
-{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
-
- fw_device_cdev_update(device);
- device_for_each_child(&device->device, NULL, update_unit);
-}
-
enum {
REREAD_BIB_ERROR,
REREAD_BIB_GONE,
@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
if (i == 0 && q == 0)
return REREAD_BIB_GONE;
- if (i > device->config_rom_length || q != device->config_rom[i])
+ if (q != device->config_rom[i])
return REREAD_BIB_CHANGED;
}
@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
+ mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list);
/*
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 8ef6ec2ca21c..97588937c018 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -19,10 +19,17 @@
#ifndef __fw_device_h
#define __fw_device_h
+#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/cdev.h>
#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <asm/atomic.h>
enum fw_device_state {
@@ -38,6 +45,9 @@ struct fw_attribute_group {
struct attribute *attrs[11];
};
+struct fw_node;
+struct fw_card;
+
/*
* Note, fw_device.generation always has to be read before fw_device.node_id.
* Use SMP memory barriers to ensure this. Otherwise requests will be sent
@@ -61,13 +71,18 @@ struct fw_device {
int node_id;
int generation;
unsigned max_speed;
- bool cmc;
struct fw_card *card;
struct device device;
+
+ struct mutex client_list_mutex;
struct list_head client_list;
+
u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
+ unsigned cmc:1;
+ unsigned bc_implemented:2;
+
struct delayed_work work;
struct fw_attribute_group attribute_group;
};
@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
struct fw_device *fw_device_get_by_devt(dev_t devt);
int fw_device_enable_phys_dma(struct fw_device *device);
+void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
@@ -176,8 +192,7 @@ struct fw_driver {
const struct fw_device_id *id_table;
};
-static inline struct fw_driver *
-fw_driver(struct device_driver *drv)
+static inline struct fw_driver *fw_driver(struct device_driver *drv)
{
return container_of(drv, struct fw_driver, driver);
}
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index e14c03dc0065..2baf1007253e 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,5 +1,7 @@
/*
- * Isochronous IO functionality
+ * Isochronous I/O functionality:
+ * - Isochronous DMA context management
+ * - Isochronous bus resource management (channels, bandwidth), client side
*
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
*
@@ -18,21 +20,25 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/firewire-constants.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
-#include "fw-transaction.h"
#include "fw-topology.h"
-#include "fw-device.h"
+#include "fw-transaction.h"
-int
-fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction)
+/*
+ * Isochronous DMA context management
+ */
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction)
{
- int i, j, retval = -ENOMEM;
+ int i, j;
dma_addr_t address;
buffer->page_count = page_count;
@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
kfree(buffer->pages);
out:
buffer->pages = NULL;
- return retval;
+
+ return -ENOMEM;
}
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
unsigned long uaddr;
- int i, retval;
+ int i, err;
uaddr = vma->vm_start;
for (i = 0; i < buffer->page_count; i++) {
- retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
- if (retval)
- return retval;
+ err = vm_insert_page(vma, uaddr, buffer->pages[i]);
+ if (err)
+ return err;
+
uaddr += PAGE_SIZE;
}
@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
buffer->pages = NULL;
}
-struct fw_iso_context *
-fw_iso_context_create(struct fw_card *card, int type,
- int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data)
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data)
{
struct fw_iso_context *ctx;
- ctx = card->driver->allocate_iso_context(card, type, header_size);
+ ctx = card->driver->allocate_iso_context(card,
+ type, channel, header_size);
if (IS_ERR(ctx))
return ctx;
@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
-int
-fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
-int
-fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct fw_card *card = ctx->card;
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
-int
-fw_iso_context_stop(struct fw_iso_context *ctx)
+int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
+
+/*
+ * Isochronous bus resource management (channels, bandwidth), client side
+ */
+
+static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
+ int bandwidth, bool allocate)
+{
+ __be32 data[2];
+ int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
+
+ /*
+ * On a 1394a IRM with low contention, try < 1 is enough.
+ * On a 1394-1995 IRM, we need at least try < 2.
+ * Let's just do try < 5.
+ */
+ for (try = 0; try < 5; try++) {
+ new = allocate ? old - bandwidth : old + bandwidth;
+ if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
+ break;
+
+ data[0] = cpu_to_be32(old);
+ data[1] = cpu_to_be32(new);
+ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+ irm_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
+ data, sizeof(data))) {
+ case RCODE_GENERATION:
+ /* A generation change frees all bandwidth. */
+ return allocate ? -EAGAIN : bandwidth;
+
+ case RCODE_COMPLETE:
+ if (be32_to_cpup(data) == old)
+ return bandwidth;
+
+ old = be32_to_cpup(data);
+ /* Fall through. */
+ }
+ }
+
+ return -EIO;
+}
+
+static int manage_channel(struct fw_card *card, int irm_id, int generation,
+ u32 channels_mask, u64 offset, bool allocate)
+{
+ __be32 data[2], c, all, old;
+ int i, retry = 5;
+
+ old = all = allocate ? cpu_to_be32(~0) : 0;
+
+ for (i = 0; i < 32; i++) {
+ if (!(channels_mask & 1 << i))
+ continue;
+
+ c = cpu_to_be32(1 << (31 - i));
+ if ((old & c) != (all & c))
+ continue;
+
+ data[0] = old;
+ data[1] = old ^ c;
+ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+ irm_id, generation, SCODE_100,
+ offset, data, sizeof(data))) {
+ case RCODE_GENERATION:
+ /* A generation change frees all channels. */
+ return allocate ? -EAGAIN : i;
+
+ case RCODE_COMPLETE:
+ if (data[0] == old)
+ return i;
+
+ old = data[0];
+
+ /* Is the IRM 1394a-2000 compliant? */
+ if ((data[0] & c) == (data[1] & c))
+ continue;
+
+ /* 1394-1995 IRM, fall through to retry. */
+ default:
+ if (retry--)
+ i--;
+ }
+ }
+
+ return -EIO;
+}
+
+static void deallocate_channel(struct fw_card *card, int irm_id,
+ int generation, int channel)
+{
+ u32 mask;
+ u64 offset;
+
+ mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
+ offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
+
+ manage_channel(card, irm_id, generation, mask, offset, false);
+}
+
+/**
+ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ *
+ * In parameters: card, generation, channels_mask, bandwidth, allocate
+ * Out parameters: channel, bandwidth
+ * This function blocks (sleeps) during communication with the IRM.
+ *
+ * Allocates or deallocates at most one channel out of channels_mask.
+ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
+ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
+ * channel 0 and LSB for channel 63.)
+ * Allocates or deallocates as many bandwidth allocation units as specified.
+ *
+ * Returns channel < 0 if no channel was allocated or deallocated.
+ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
+ *
+ * If generation is stale, deallocations succeed but allocations fail with
+ * channel = -EAGAIN.
+ *
+ * If channel allocation fails, no bandwidth will be allocated either.
+ * If bandwidth allocation fails, no channel will be allocated either.
+ * But deallocations of channel and bandwidth are tried independently
+ * of each other's success.
+ */
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+ u64 channels_mask, int *channel, int *bandwidth,
+ bool allocate)
+{
+ u32 channels_hi = channels_mask; /* channels 31...0 */
+ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
+ int irm_id, ret, c = -EINVAL;
+
+ spin_lock_irq(&card->lock);
+ irm_id = card->irm_node->node_id;
+ spin_unlock_irq(&card->lock);
+
+ if (channels_hi)
+ c = manage_channel(card, irm_id, generation, channels_hi,
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+ if (channels_lo && c < 0) {
+ c = manage_channel(card, irm_id, generation, channels_lo,
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+ if (c >= 0)
+ c += 32;
+ }
+ *channel = c;
+
+ if (allocate && channels_mask != 0 && c < 0)
+ *bandwidth = 0;
+
+ if (*bandwidth == 0)
+ return;
+
+ ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+ if (ret < 0)
+ *bandwidth = 0;
+
+ if (allocate && ret < 0 && c >= 0) {
+ deallocate_channel(card, irm_id, generation, c);
+ *channel = ret;
+ }
+}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 6d19828a93a5..1180d0be0bb4 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -205,6 +205,7 @@ struct fw_ohci {
u32 it_context_mask;
struct iso_context *it_context_list;
+ u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
};
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
-static int
-ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 val, old;
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
}
}
-static int
-ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
+static int ar_context_init(struct ar_context *ctx,
+ struct fw_ohci *ohci, u32 regs)
{
struct ar_buffer ab;
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
flush_writes(ctx->ohci);
}
-static struct descriptor *
-find_branch_descriptor(struct descriptor *d, int z)
+static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
{
int b, key;
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
*/
-static int
-context_add_buffer(struct context *ctx)
+static int context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
dma_addr_t uninitialized_var(bus_addr);
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
return 0;
}
-static int
-context_init(struct context *ctx, struct fw_ohci *ohci,
- u32 regs, descriptor_callback_t callback)
+static int context_init(struct context *ctx, struct fw_ohci *ohci,
+ u32 regs, descriptor_callback_t callback)
{
ctx->ohci = ohci;
ctx->regs = regs;
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
return 0;
}
-static void
-context_release(struct context *ctx)
+static void context_release(struct context *ctx)
{
struct fw_card *card = &ctx->ohci->card;
struct descriptor_buffer *desc, *tmp;
@@ -827,8 +823,8 @@ context_release(struct context *ctx)
}
/* Must be called with ohci->lock held */
-static struct descriptor *
-context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+static struct descriptor *context_get_descriptors(struct context *ctx,
+ int z, dma_addr_t *d_bus)
{
struct descriptor *d = NULL;
struct descriptor_buffer *desc = ctx->buffer_tail;
@@ -912,8 +908,8 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
-static int
-at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
+static int at_context_queue_packet(struct context *ctx,
+ struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
dma_addr_t d_bus, uninitialized_var(payload_bus);
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
*/
header = (__le32 *) &d[1];
- if (packet->header_length > 8) {
+ switch (packet->header_length) {
+ case 16:
+ case 12:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
header[3] = (__force __le32) packet->header[3];
d[0].req_count = cpu_to_le16(packet->header_length);
- } else {
+ break;
+
+ case 8:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0]);
header[2] = cpu_to_le32(packet->header[1]);
d[0].req_count = cpu_to_le16(12);
+ break;
+
+ case 4:
+ header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+ (packet->speed << 16));
+ header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+ d[0].req_count = cpu_to_le16(8);
+ break;
+
+ default:
+ /* BUG(); */
+ packet->ack = RCODE_SEND_ERROR;
+ return -1;
}
driver_data = (struct driver_data *) &d[3];
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
-static void
-handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+static void handle_local_rom(struct fw_ohci *ohci,
+ struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, i;
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response);
}
-static void
-handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+static void handle_local_lock(struct fw_ohci *ohci,
+ struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, ext_tcode, sel;
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response);
}
-static void
-handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct context *ctx, struct fw_packet *packet)
{
u64 offset;
u32 csr;
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
-static void
-at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
- int retval;
+ int ret;
spin_lock_irqsave(&ctx->ohci->lock, flags);
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
return;
}
- retval = at_context_queue_packet(ctx, packet);
+ ret = at_context_queue_packet(ctx, packet);
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
- if (retval < 0)
+ if (ret < 0)
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
return 0;
}
-static int
-ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
+static int ohci_set_config_rom(struct fw_card *card,
+ u32 *config_rom, size_t length)
{
struct fw_ohci *ohci;
unsigned long flags;
- int retval = -EBUSY;
+ int ret = -EBUSY;
__be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus);
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
reg_write(ohci, OHCI1394_ConfigROMmap,
ohci->next_config_rom_bus);
- retval = 0;
+ ret = 0;
}
spin_unlock_irqrestore(&ohci->lock, flags);
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
* controller could need to access it before the bus reset
* takes effect.
*/
- if (retval == 0)
+ if (ret == 0)
fw_core_initiate_bus_reset(&ohci->card, 1);
else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
- return retval;
+ return ret;
}
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
struct fw_ohci *ohci = fw_ohci(card);
struct context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
- int retval = -ENOENT;
+ int ret = -ENOENT;
tasklet_disable(&ctx->tasklet);
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
packet->callback(packet, &ohci->card, packet->ack);
- retval = 0;
-
+ ret = 0;
out:
tasklet_enable(&ctx->tasklet);
- return retval;
+ return ret;
}
-static int
-ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
+static int ohci_enable_phys_dma(struct fw_card *card,
+ int node_id, int generation)
{
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
return 0;
#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
- int n, retval = 0;
+ int n, ret = 0;
/*
* FIXME: Make sure this bitmask is cleared when we clear the busReset
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
spin_lock_irqsave(&ohci->lock, flags);
if (ohci->generation != generation) {
- retval = -ESTALE;
+ ret = -ESTALE;
goto out;
}
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
flush_writes(ohci);
out:
spin_unlock_irqrestore(&ohci->lock, flags);
- return retval;
+
+ return ret;
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u64
-ohci_get_bus_time(struct fw_card *card)
+static u64 ohci_get_bus_time(struct fw_card *card)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 cycle_time;
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
return bus_time;
}
+static void copy_iso_headers(struct iso_context *ctx, void *p)
+{
+ int i = ctx->header_length;
+
+ if (i + ctx->base.header_size > PAGE_SIZE)
+ return;
+
+ /*
+ * The iso header is byteswapped to little endian by
+ * the controller, but the remaining header quadlets
+ * are big endian. We want to present all the headers
+ * as big endian, so we have to swap the first quadlet.
+ */
+ if (ctx->base.header_size > 0)
+ *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+ if (ctx->base.header_size > 4)
+ *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
+ if (ctx->base.header_size > 8)
+ memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
+ ctx->header_length += ctx->base.header_size;
+}
+
static int handle_ir_dualbuffer_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
__le32 *ir_header;
size_t header_length;
void *p, *end;
- int i;
if (db->first_res_count != 0 && db->second_res_count != 0) {
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
header_length = le16_to_cpu(db->first_req_count) -
le16_to_cpu(db->first_res_count);
- i = ctx->header_length;
p = db + 1;
end = p + header_length;
- while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
- /*
- * The iso header is byteswapped to little endian by
- * the controller, but the remaining header quadlets
- * are big endian. We want to present all the headers
- * as big endian, so we have to swap the first
- * quadlet.
- */
- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
- i += ctx->base.header_size;
+ while (p < end) {
+ copy_iso_headers(ctx, p);
ctx->excess_bytes +=
(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
- p += ctx->base.header_size + 4;
+ p += max(ctx->base.header_size, (size_t)8);
}
- ctx->header_length = i;
ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
le16_to_cpu(db->second_res_count);
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *pd;
__le32 *ir_header;
void *p;
- int i;
for (pd = d; pd <= last; pd++) {
if (pd->transfer_status)
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
- i = ctx->header_length;
- p = last + 1;
-
- if (ctx->base.header_size > 0 &&
- i + ctx->base.header_size <= PAGE_SIZE) {
- /*
- * The iso header is byteswapped to little endian by
- * the controller, but the remaining header quadlets
- * are big endian. We want to present all the headers
- * as big endian, so we have to swap the first quadlet.
- */
- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
- ctx->header_length += ctx->base.header_size;
- }
+ p = last + 1;
+ copy_iso_headers(ctx, p);
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) p;
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
return 1;
}
-static struct fw_iso_context *
-ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
+ int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
struct iso_context *ctx, *list;
descriptor_callback_t callback;
+ u64 *channels, dont_care = ~0ULL;
u32 *mask, regs;
unsigned long flags;
- int index, retval = -ENOMEM;
+ int index, ret = -ENOMEM;
if (type == FW_ISO_CONTEXT_TRANSMIT) {
+ channels = &dont_care;
mask = &ohci->it_context_mask;
list = ohci->it_context_list;
callback = handle_it_packet;
} else {
+ channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
if (ohci->use_dualbuffer)
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
}
spin_lock_irqsave(&ohci->lock, flags);
- index = ffs(*mask) - 1;
- if (index >= 0)
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
*mask &= ~(1 << index);
+ }
spin_unlock_irqrestore(&ohci->lock, flags);
if (index < 0)
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
if (ctx->header == NULL)
goto out;
- retval = context_init(&ctx->context, ohci, regs, callback);
- if (retval < 0)
+ ret = context_init(&ctx->context, ohci, regs, callback);
+ if (ret < 0)
goto out_with_header;
return &ctx->base;
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
*mask |= 1 << index;
spin_unlock_irqrestore(&ohci->lock, flags);
- return ERR_PTR(retval);
+ return ERR_PTR(ret);
}
static int ohci_start_iso(struct fw_iso_context *base,
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
} else {
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
+ ohci->ir_context_channels |= 1ULL << base->channel;
}
spin_unlock_irqrestore(&ohci->lock, flags);
}
-static int
-ohci_queue_iso_transmit(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_queue_iso_transmit(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd;
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
-static int
-ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct db_descriptor *db = NULL;
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
z = 2;
/*
- * The OHCI controller puts the status word in the header
- * buffer too, so we need 4 extra bytes per packet.
+ * The OHCI controller puts the isochronous header and trailer in the
+ * buffer, so we need at least 8 bytes.
*/
packet_count = p->header_length / ctx->base.header_size;
- header_size = packet_count * (ctx->base.header_size + 4);
+ header_size = packet_count * max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
db = (struct db_descriptor *) d;
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
- db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+ db->first_size =
+ cpu_to_le16(max(ctx->base.header_size, (size_t)8));
if (p->skip && rest == p->payload_length) {
db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
db->first_req_count = db->first_size;
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
return 0;
}
-static int
-ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d = NULL, *pd = NULL;
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
int page, offset, packet_count, header_size, payload_per_buffer;
/*
- * The OHCI controller puts the status word in the
- * buffer too, so we need 4 extra bytes per packet.
+ * The OHCI controller puts the isochronous header and trailer in the
+ * buffer, so we need at least 8 bytes.
*/
packet_count = p->header_length / ctx->base.header_size;
- header_size = ctx->base.header_size + 4;
+ header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
return 0;
}
-static int
-ohci_queue_iso(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_queue_iso(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
- int retval;
+ int ret;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
- retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
+ ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->use_dualbuffer)
- retval = ohci_queue_iso_receive_dualbuffer(base, packet,
- buffer, payload);
+ ret = ohci_queue_iso_receive_dualbuffer(base, packet,
+ buffer, payload);
else
- retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
- buffer,
- payload);
+ ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+ buffer, payload);
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
- return retval;
+ return ret;
}
static const struct fw_card_driver ohci_driver = {
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
-static int __devinit
-pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+ ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
reg_read(ohci, OHCI1394_GUIDLo);
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
- if (err < 0)
+ if (err)
goto fail_self_id;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff);
+
return 0;
fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index c71c4419d9e8..2bcf51557c72 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -392,20 +392,18 @@ static const struct {
}
};
-static void
-free_orb(struct kref *kref)
+static void free_orb(struct kref *kref)
{
struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
kfree(orb);
}
-static void
-sbp2_status_write(struct fw_card *card, struct fw_request *request,
- int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb;
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
fw_send_response(card, request, RCODE_COMPLETE);
}
-static void
-complete_transaction(struct fw_card *card, int rcode,
- void *payload, size_t length, void *data)
+static void complete_transaction(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
{
struct sbp2_orb *orb = data;
unsigned long flags;
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
kref_put(&orb->kref, free_orb);
}
-static void
-sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
- int node_id, int generation, u64 offset)
+static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
+ int node_id, int generation, u64 offset)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
unsigned long flags;
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
return retval;
}
-static void
-complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+static void complete_management_orb(struct sbp2_orb *base_orb,
+ struct sbp2_status *status)
{
struct sbp2_management_orb *orb =
container_of(base_orb, struct sbp2_management_orb, base);
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
complete(&orb->done);
}
-static int
-sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
- int generation, int function, int lun_or_login_id,
- void *response)
+static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+ int generation, int function,
+ int lun_or_login_id, void *response)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_management_orb *orb;
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
&d, sizeof(d));
}
-static void
-complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
- void *payload, size_t length, void *data)
+static void complete_agent_reset_write_no_wait(struct fw_card *card,
+ int rcode, void *payload, size_t length, void *data)
{
kfree(data);
}
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
sizeof(orb->page_table), DMA_TO_DEVICE);
}
-static unsigned int
-sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
{
int sam_status;
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
}
}
-static void
-complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+static void complete_command_orb(struct sbp2_orb *base_orb,
+ struct sbp2_status *status)
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
orb->done(orb->cmd);
}
-static int
-sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
- struct sbp2_logical_unit *lu)
+static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
+ struct fw_device *device, struct sbp2_logical_unit *lu)
{
struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n;
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
* This is the concatenation of target port identifier and logical unit
* identifier as per SAM-2...SAM-4 annex A.
*/
-static ssize_t
-sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu;
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 8dd6703b55cd..d0deecc4de93 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
struct fw_node * node,
struct fw_node * parent);
-static void
-for_each_fw_node(struct fw_card *card, struct fw_node *root,
- fw_node_callback_t callback)
+static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
+ fw_node_callback_t callback)
{
struct list_head list;
struct fw_node *node, *next, *child, *parent;
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_put(node);
}
-static void
-report_lost_node(struct fw_card *card,
- struct fw_node *node, struct fw_node *parent)
+static void report_lost_node(struct fw_card *card,
+ struct fw_node *node, struct fw_node *parent)
{
fw_node_event(card, node, FW_NODE_DESTROYED);
fw_node_put(node);
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
card->bm_retries = 0;
}
-static void
-report_found_node(struct fw_card *card,
- struct fw_node *node, struct fw_node *parent)
+static void report_found_node(struct fw_card *card,
+ struct fw_node *node, struct fw_node *parent)
{
int b_path = (node->phy_speed == SCODE_BETA);
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
* found, lost or updated. Update the nodes in the card topology tree
* as we go.
*/
-static void
-update_tree(struct fw_card *card, struct fw_node *root)
+static void update_tree(struct fw_card *card, struct fw_node *root)
{
struct list_head list0, list1;
struct fw_node *node0, *node1, *next1;
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void
-update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
+static void update_topology_map(struct fw_card *card,
+ u32 *self_ids, int self_id_count)
{
int node_count;
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
fw_compute_block_crc(card->topology_map);
}
-void
-fw_core_handle_bus_reset(struct fw_card *card,
- int node_id, int generation,
- int self_id_count, u32 * self_ids)
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
+ int self_id_count, u32 *self_ids)
{
struct fw_node *local_node;
unsigned long flags;
@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
spin_lock_irqsave(&card->lock, flags);
+ card->broadcast_channel_allocated = false;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index addb9f8ea776..3c497bb4fae4 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -19,6 +19,11 @@
#ifndef __fw_topology_h
#define __fw_topology_h
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -51,26 +56,22 @@ struct fw_node {
struct fw_node *ports[0];
};
-static inline struct fw_node *
-fw_node_get(struct fw_node *node)
+static inline struct fw_node *fw_node_get(struct fw_node *node)
{
atomic_inc(&node->ref_count);
return node;
}
-static inline void
-fw_node_put(struct fw_node *node)
+static inline void fw_node_put(struct fw_node *node)
{
if (atomic_dec_and_test(&node->ref_count))
kfree(node);
}
-void
-fw_destroy_nodes(struct fw_card *card);
-
-int
-fw_compute_block_crc(u32 *block);
+struct fw_card;
+void fw_destroy_nodes(struct fw_card *card);
+int fw_compute_block_crc(u32 *block);
#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 699ac041f39a..283dac6d327d 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -64,10 +64,8 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
-static int
-close_transaction(struct fw_transaction *transaction,
- struct fw_card *card, int rcode,
- u32 *payload, size_t length)
+static int close_transaction(struct fw_transaction *transaction,
+ struct fw_card *card, int rcode)
{
struct fw_transaction *t;
unsigned long flags;
@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
- t->callback(card, rcode, payload, length, t->callback_data);
+ t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
* Only valid for transactions that are potentially pending (ie have
* been sent).
*/
-int
-fw_cancel_transaction(struct fw_card *card,
- struct fw_transaction *transaction)
+int fw_cancel_transaction(struct fw_card *card,
+ struct fw_transaction *transaction)
{
/*
* Cancel the packet transmission if it's still queued. That
@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
* if the transaction is still pending and remove it in that case.
*/
- return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
+ return close_transaction(transaction, card, RCODE_CANCELLED);
}
EXPORT_SYMBOL(fw_cancel_transaction);
-static void
-transmit_complete_callback(struct fw_packet *packet,
- struct fw_card *card, int status)
+static void transmit_complete_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
{
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
switch (status) {
case ACK_COMPLETE:
- close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
+ close_transaction(t, card, RCODE_COMPLETE);
break;
case ACK_PENDING:
t->timestamp = packet->timestamp;
@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
- close_transaction(t, card, RCODE_BUSY, NULL, 0);
+ close_transaction(t, card, RCODE_BUSY);
break;
case ACK_DATA_ERROR:
- close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
+ close_transaction(t, card, RCODE_DATA_ERROR);
break;
case ACK_TYPE_ERROR:
- close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
+ close_transaction(t, card, RCODE_TYPE_ERROR);
break;
default:
/*
* In this case the ack is really a juju specific
* rcode, so just forward that to the callback.
*/
- close_transaction(t, card, status, NULL, 0);
+ close_transaction(t, card, status);
break;
}
}
-static void
-fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length)
{
int ext_tcode;
+ if (tcode == TCODE_STREAM_DATA) {
+ packet->header[0] =
+ HEADER_DATA_LENGTH(length) |
+ destination_id |
+ HEADER_TCODE(TCODE_STREAM_DATA);
+ packet->header_length = 4;
+ packet->payload = payload;
+ packet->payload_length = length;
+
+ goto common;
+ }
+
if (tcode > 0x10) {
ext_tcode = tcode & ~0x10;
tcode = TCODE_LOCK_REQUEST;
@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_length = 0;
break;
}
-
+ common:
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
* @param callback function to be called when the transaction is completed
* @param callback_data pointer to arbitrary data, which will be
* passed to the callback
+ *
+ * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * needs to synthesize @destination_id with fw_stream_packet_destination_id().
*/
-void
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset,
- void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data)
+void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
{
unsigned long flags;
int tlabel;
@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
* Returns the RCODE.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
- int generation, int speed, unsigned long long offset,
- void *data, size_t length)
+ int generation, int speed, unsigned long long offset,
+ void *payload, size_t length)
{
struct transaction_callback_data d;
struct fw_transaction t;
init_completion(&d.done);
- d.payload = data;
+ d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
- offset, data, length, transaction_callback, &d);
+ offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
return d.rcode;
@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
}
}
-static struct fw_address_handler *
-lookup_overlapping_address_handler(struct list_head *list,
- unsigned long long offset, size_t length)
+static struct fw_address_handler *lookup_overlapping_address_handler(
+ struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
return NULL;
}
-static struct fw_address_handler *
-lookup_enclosing_address_handler(struct list_head *list,
- unsigned long long offset, size_t length)
+static struct fw_address_handler *lookup_enclosing_address_handler(
+ struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
#endif /* 0 */
/**
- * Allocate a range of addresses in the node space of the OHCI
- * controller. When a request is received that falls within the
- * specified address range, the specified callback is invoked. The
- * parameters passed to the callback give the details of the
- * particular request.
+ * fw_core_add_address_handler - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
+ *
+ * region->start, ->end, and handler->length have to be quadlet-aligned.
+ *
+ * When a request is received that falls within the specified address range,
+ * the specified callback is invoked. The parameters passed to the callback
+ * give the details of the particular request.
*
* Return value: 0 on success, non-zero otherwise.
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
- * The offset is quadlet-aligned.
*/
-int
-fw_core_add_address_handler(struct fw_address_handler *handler,
- const struct fw_address_region *region)
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+ const struct fw_address_region *region)
{
struct fw_address_handler *other;
unsigned long flags;
int ret = -EBUSY;
+ if (region->start & 0xffff000000000003ULL ||
+ region->end & 0xffff000000000003ULL ||
+ region->start >= region->end ||
+ handler->length & 3 ||
+ handler->length == 0)
+ return -EINVAL;
+
spin_lock_irqsave(&address_handler_lock, flags);
- handler->offset = roundup(region->start, 4);
+ handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
other =
lookup_overlapping_address_handler(&address_handler_list,
handler->offset,
handler->length);
if (other != NULL) {
- handler->offset =
- roundup(other->offset + other->length, 4);
+ handler->offset += other->length;
} else {
list_add_tail(&handler->link, &address_handler_list);
ret = 0;
@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
- * Deallocate a range of addresses allocated with fw_allocate. This
- * will call the associated callback one last time with a the special
- * tcode TCODE_DEALLOCATE, to let the client destroy the registered
- * callback data. For convenience, the callback parameters offset and
- * length are set to the start and the length respectively for the
- * deallocated region, payload is set to NULL.
+ * fw_core_remove_address_handler - unregister an address handler
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
@@ -518,9 +527,8 @@ struct fw_request {
u32 data[0];
};
-static void
-free_response_callback(struct fw_packet *packet,
- struct fw_card *card, int status)
+static void free_response_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
{
struct fw_request *request;
@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
kfree(request);
}
-void
-fw_fill_response(struct fw_packet *response, u32 *request_header,
- int rcode, void *payload, size_t length)
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+ int rcode, void *payload, size_t length)
{
int tcode, tlabel, extended_tcode, source, destination;
@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
}
EXPORT_SYMBOL(fw_fill_response);
-static struct fw_request *
-allocate_request(struct fw_packet *p)
+static struct fw_request *allocate_request(struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
return request;
}
-void
-fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
+void fw_send_response(struct fw_card *card,
+ struct fw_request *request, int rcode)
{
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
}
EXPORT_SYMBOL(fw_send_response);
-void
-fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{
struct fw_address_handler *handler;
struct fw_request *request;
@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
}
EXPORT_SYMBOL(fw_core_handle_request);
-void
-fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t;
unsigned long flags;
@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
-static void
-handle_topology_map(struct fw_card *card, struct fw_request *request,
- int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+static void handle_topology_map(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
{
int i, start, end;
__be32 *map;
@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
-static void
-handle_registers(struct fw_card *card, struct fw_request *request,
- int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+static void handle_registers(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time;
@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
static int __init fw_core_init(void)
{
- int retval;
+ int ret;
- retval = bus_register(&fw_bus_type);
- if (retval < 0)
- return retval;
+ ret = bus_register(&fw_bus_type);
+ if (ret < 0)
+ return ret;
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
if (fw_cdev_major < 0) {
@@ -951,19 +951,10 @@ static int __init fw_core_init(void)
return fw_cdev_major;
}
- retval = fw_core_add_address_handler(&topology_map,
- &topology_map_region);
- BUG_ON(retval < 0);
-
- retval = fw_core_add_address_handler(&registers,
- &registers_region);
- BUG_ON(retval < 0);
-
- /* Add the vendor textual descriptor. */
- retval = fw_core_add_descriptor(&vendor_id_descriptor);
- BUG_ON(retval < 0);
- retval = fw_core_add_descriptor(&model_id_descriptor);
- BUG_ON(retval < 0);
+ fw_core_add_address_handler(&topology_map, &topology_map_region);
+ fw_core_add_address_handler(&registers, &registers_region);
+ fw_core_add_descriptor(&vendor_id_descriptor);
+ fw_core_add_descriptor(&model_id_descriptor);
return 0;
}
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 1d78e9cc5940..dfa799068f89 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -82,14 +82,14 @@
#define CSR_SPEED_MAP 0x2000
#define CSR_SPEED_MAP_END 0x3000
+#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-static inline void
-fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
{
u32 *dst = _dst;
__be32 *src = _src;
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
dst[i] = be32_to_cpu(src[i]);
}
-static inline void
-fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
{
fw_memcpy_from_be32(_dst, _src, size);
}
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
struct fw_card *card, int status);
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
- void *data,
- size_t length,
+ void *data, size_t length,
void *callback_data);
/*
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
void *data, size_t length,
void *callback_data);
-typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
- int node_id, int generation,
- u32 *self_ids,
- int self_id_count,
- void *callback_data);
-
struct fw_packet {
int speed;
int generation;
@@ -187,12 +179,6 @@ struct fw_transaction {
void *callback_data;
};
-static inline struct fw_packet *
-fw_packet(struct list_head *l)
-{
- return list_entry(l, struct fw_packet, link);
-}
-
struct fw_address_handler {
u64 offset;
size_t length;
@@ -201,7 +187,6 @@ struct fw_address_handler {
struct list_head link;
};
-
struct fw_address_region {
u64 start;
u64 end;
@@ -255,6 +240,7 @@ struct fw_card {
int bm_retries;
int bm_generation;
+ bool broadcast_channel_allocated;
u32 broadcast_channel;
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
};
@@ -315,10 +301,8 @@ struct fw_iso_packet {
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle,
- size_t header_length,
- void *header,
- void *data);
+ u32 cycle, size_t header_length,
+ void *header, void *data);
/*
* An iso buffer is just a set of pages mapped for DMA in the
@@ -344,36 +328,25 @@ struct fw_iso_context {
void *callback_data;
};
-int
-fw_iso_buffer_init(struct fw_iso_buffer *buffer,
- struct fw_card *card,
- int page_count,
- enum dma_data_direction direction);
-int
-fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void
-fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *
-fw_iso_context_create(struct fw_card *card, int type,
- int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-
-void
-fw_iso_context_destroy(struct fw_iso_context *ctx);
-
-int
-fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-
-int
-fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-
-int
-fw_iso_context_stop(struct fw_iso_context *ctx);
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+ u64 channels_mask, int *channel, int *bandwidth, bool allocate);
struct fw_card_driver {
/*
@@ -415,7 +388,7 @@ struct fw_card_driver {
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
- int type, size_t header_size);
+ int type, int channel, size_t header_size);
void (*free_iso_context)(struct fw_iso_context *ctx);
int (*start_iso)(struct fw_iso_context *ctx,
@@ -429,54 +402,45 @@ struct fw_card_driver {
int (*stop_iso)(struct fw_iso_context *ctx);
};
-int
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
-void
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *data, size_t length,
+ unsigned long long offset, void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data);
-
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
- int generation, int speed, unsigned long long offset,
- void *data, size_t length);
-
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
-
void fw_flush_transactions(struct fw_card *card);
-
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+ int generation, int speed, unsigned long long offset,
+ void *payload, size_t length);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
/*
* Called by the topology code to inform the device code of node
* activity; found, lost, or updated nodes.
*/
-void
-fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* API used by card level drivers */
-void
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
- struct device *device);
-int
-fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid);
-
-void
-fw_core_remove_card(struct fw_card *card);
-
-void
-fw_core_handle_bus_reset(struct fw_card *card,
- int node_id, int generation,
- int self_id_count, u32 *self_ids);
-void
-fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
-
-void
-fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver, struct device *device);
+int fw_card_add(struct fw_card *card,
+ u32 max_receive, u32 link_speed, u64 guid);
+void fw_core_remove_card(struct fw_card *card);
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
+ int generation, int self_id_count, u32 *self_ids);
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+
+extern int fw_irm_set_broadcast_channel_register(struct device *dev,
+ void *data);
#endif /* __fw_transaction_h */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4a5c12..4ec5061fa584 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
+ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
+ drm_info.o drm_debugfs.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 000000000000..c77c6c6d9d2c
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+ {"name", drm_name_info, 0},
+ {"vm", drm_vm_info, 0},
+ {"clients", drm_clients_info, 0},
+ {"queues", drm_queues_info, 0},
+ {"bufs", drm_bufs_info, 0},
+ {"gem_names", drm_gem_name_info, DRIVER_GEM},
+ {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+ {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct drm_info_node *node = inode->i_private;
+
+ return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+ struct drm_info_node *tmp;
+ char name[64];
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ u32 features = files[i].driver_features;
+
+ if (features != 0 &&
+ (dev->driver->driver_features & features) != features)
+ continue;
+
+ tmp = drm_alloc(sizeof(struct drm_info_node),
+ _DRM_DRIVER);
+ ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+ root, tmp, &drm_debugfs_fops);
+ if (!ent) {
+ DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+ name, files[i].name);
+ drm_free(tmp, sizeof(struct drm_info_node),
+ _DRM_DRIVER);
+ ret = -1;
+ goto fail;
+ }
+
+ tmp->minor = minor;
+ tmp->dent = ent;
+ tmp->info_ent = &files[i];
+ list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+ }
+ return 0;
+
+fail:
+ drm_debugfs_remove_files(files, count, minor);
+ return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
+ * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
+ * "/debugfs/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+ struct dentry *root)
+{
+ struct drm_device *dev = minor->dev;
+ char name[64];
+ int ret;
+
+ INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+ sprintf(name, "%d", minor_id);
+ minor->debugfs_root = debugfs_create_dir(name, root);
+ if (!minor->debugfs_root) {
+ DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+ return -1;
+ }
+
+ ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret) {
+ debugfs_remove(minor->debugfs_root);
+ minor->debugfs_root = NULL;
+ DRM_ERROR("Failed to create core drm debugfs files\n");
+ return ret;
+ }
+
+ if (dev->driver->debugfs_init) {
+ ret = dev->driver->debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("DRM: Driver failed to initialize "
+ "/debugfs/dri.\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+ struct drm_minor *minor)
+{
+ struct list_head *pos, *q;
+ struct drm_info_node *tmp;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+ tmp = list_entry(pos, struct drm_info_node, list);
+ if (tmp->info_ent == &files[i]) {
+ debugfs_remove(tmp->dent);
+ list_del(pos);
+ drm_free(tmp, sizeof(struct drm_info_node),
+ _DRM_DRIVER);
+ }
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+
+ if (!minor->debugfs_root)
+ return 0;
+
+ if (dev->driver->debugfs_cleanup)
+ dev->driver->debugfs_cleanup(minor);
+
+ drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+ debugfs_remove(minor->debugfs_root);
+ minor->debugfs_root = NULL;
+
+ return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23dc157..ed32edb17166 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include "drmP.h"
#include "drm_core.h"
+
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
goto err_p3;
}
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ if (!drm_debugfs_root) {
+ DRM_ERROR("Cannot create /debugfs/dri\n");
+ ret = -1;
+ goto err_p3;
+ }
+
drm_mem_init();
DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
static void __exit drm_core_exit(void)
{
remove_proc_entry("dri", NULL);
+ debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 000000000000..1b699768ccfb
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_master *master = minor->master;
+
+ if (!master)
+ return 0;
+
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ dev->driver->pci_driver.name,
+ pci_name(dev->pdev), master->unique);
+ } else {
+ seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+ pci_name(dev->pdev));
+ }
+
+ return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_map *map;
+ struct drm_map_list *r_list;
+
+ /* Hardcoded from _DRM_FRAME_BUFFER,
+ _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+ _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+ const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+ const char *type;
+ int i;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "slot offset size type flags address mtrr\n\n");
+ i = 0;
+ list_for_each_entry(r_list, &dev->maplist, head) {
+ map = r_list->map;
+ if (!map)
+ continue;
+ if (map->type < 0 || map->type > 5)
+ type = "??";
+ else
+ type = types[map->type];
+
+ seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size, type, map->flags,
+ (unsigned long) r_list->user_token);
+ if (map->mtrr < 0)
+ seq_printf(m, "none\n");
+ else
+ seq_printf(m, "%4d\n", map->mtrr);
+ i++;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int i;
+ struct drm_queue *q;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, " ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ seq_printf(m, "%5d/0x%03x %5d %5d"
+ " %5d/%c%c/%c%c%c %5Zd\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ waitqueue_active(&q->read_queue) ? 'r' : '-',
+ waitqueue_active(&q->write_queue) ? 'w' : '-',
+ waitqueue_active(&q->flush_queue) ? 'f' : '-',
+ DRM_BUFCOUNT(&q->waitlist));
+ atomic_dec(&q->use_count);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_device_dma *dma;
+ int i, seg_pages;
+
+ mutex_lock(&dev->struct_mutex);
+ dma = dev->dma;
+ if (!dma) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ seq_printf(m, " o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count) {
+ seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+ seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i].freelist.count),
+ dma->bufs[i].seg_count,
+ seg_pages,
+ seg_pages * PAGE_SIZE / 1024);
+ }
+ }
+ seq_printf(m, "\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i % 32))
+ seq_printf(m, "\n");
+ seq_printf(m, " %d", dma->buflist[i]->list);
+ }
+ seq_printf(m, "\n");
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int crtc;
+
+ mutex_lock(&dev->struct_mutex);
+ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+ seq_printf(m, "CRTC %d enable: %d\n",
+ crtc, atomic_read(&dev->vblank_refcount[crtc]));
+ seq_printf(m, "CRTC %d counter: %d\n",
+ crtc, drm_vblank_count(dev, crtc));
+ seq_printf(m, "CRTC %d last wait: %d\n",
+ crtc, dev->last_vblank_wait[crtc]);
+ seq_printf(m, "CRTC %d in modeset: %d\n",
+ crtc, dev->vblank_inmodeset[crtc]);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_file *priv;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "a dev pid uid magic ioctls\n\n");
+ list_for_each_entry(priv, &dev->filelist, lhead) {
+ seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor->index,
+ priv->pid,
+ priv->uid, priv->magic, priv->ioctl_count);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct seq_file *m = data;
+
+ seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+ seq_printf(m, "%6d %8zd %7d %8d\n",
+ obj->name, obj->size,
+ atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->refcount.refcount));
+ return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ seq_printf(m, " name size handles refcount\n");
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ return 0;
+}
+
+int drm_gem_object_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
+ seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
+ seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
+ seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
+ seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+ seq_printf(m, "%d gtt total\n", dev->gtt_total);
+ return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_vma_entry *pt;
+ struct vm_area_struct *vma;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, (u64)virt_to_phys(high_memory));
+
+ list_for_each_entry(pt, &dev->vmalist, head) {
+ vma = pt->vma;
+ if (!vma)
+ continue;
+ seq_printf(m,
+ "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+ pt->pid, vma->vm_start, vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_pgoff);
+
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_PSE ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+ seq_printf(m, "\n");
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index b756f043a5f4..9b3c5af61e98 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,697 +37,196 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/seq_file.h>
#include "drmP.h"
-static int drm_name_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_vm_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_clients_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_queues_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_bufs_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_vblank_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-#if DRM_DEBUG_CODE
-static int drm_vma_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-#endif
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
/**
* Proc file list.
*/
-static struct drm_proc_list {
- const char *name; /**< file name */
- int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
- u32 driver_features; /**< Required driver features for this entry */
-} drm_proc_list[] = {
+static struct drm_info_list drm_proc_list[] = {
{"name", drm_name_info, 0},
- {"mem", drm_mem_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
- {"vblank", drm_vblank_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
- {"vma", drm_vma_info},
+ {"vma", drm_vma_info, 0},
#endif
};
-
#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+ struct drm_info_node* node = PDE(inode)->data;
+
+ return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
/**
- * Initialize the DRI proc filesystem for a device.
+ * Initialize a given set of proc files for a device
*
- * \param dev DRM device.
- * \param minor device minor number.
+ * \param files The array of files to create
+ * \param count The number of files given
* \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
*
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
*/
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
+int drm_proc_create_files(struct drm_info_list *files, int count,
+ struct proc_dir_entry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
- int i, j, ret;
+ struct drm_info_node *tmp;
char name[64];
+ int i, ret;
- sprintf(name, "%d", minor_id);
- minor->dev_root = proc_mkdir(name, root);
- if (!minor->dev_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- for (i = 0; i < DRM_PROC_ENTRIES; i++) {
- u32 features = drm_proc_list[i].driver_features;
+ for (i = 0; i < count; i++) {
+ u32 features = files[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
- ent = create_proc_entry(drm_proc_list[i].name,
- S_IFREG | S_IRUGO, minor->dev_root);
+ tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
+ ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, drm_proc_list[i].name);
+ name, files[i].name);
+ drm_free(tmp, sizeof(struct drm_info_node),
+ _DRM_DRIVER);
ret = -1;
goto fail;
}
- ent->read_proc = drm_proc_list[i].f;
- ent->data = minor;
- }
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- goto fail;
- }
+ ent->proc_fops = &drm_proc_fops;
+ ent->data = tmp;
+ tmp->minor = minor;
+ tmp->info_ent = &files[i];
+ list_add(&(tmp->list), &(minor->proc_nodes.list));
}
-
return 0;
- fail:
- for (j = 0; j < i; j++)
- remove_proc_entry(drm_proc_list[i].name,
- minor->dev_root);
- remove_proc_entry(name, root);
- minor->dev_root = NULL;
+fail:
+ for (i = 0; i < count; i++)
+ remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
return ret;
}
/**
- * Cleanup the proc filesystem resources.
+ * Initialize the DRI proc filesystem for a device
*
- * \param minor device minor number.
+ * \param dev DRM device
+ * \param minor device minor number
* \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
*
- * Remove all proc entries created by proc_init().
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
*/
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+ struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
- int i;
char name[64];
+ int ret;
- if (!root || !minor->dev_root)
- return 0;
-
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
- for (i = 0; i < DRM_PROC_ENTRIES; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
- sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints the device name together with the bus id if available.
- */
-static int drm_name_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_master *master = minor->master;
- struct drm_device *dev = minor->dev;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
+ INIT_LIST_HEAD(&minor->proc_nodes.list);
+ sprintf(name, "%d", minor_id);
+ minor->proc_root = proc_mkdir(name, root);
+ if (!minor->proc_root) {
+ DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+ return -1;
}
- if (!master)
- return 0;
-
- *start = &buf[offset];
- *eof = 0;
-
- if (master->unique) {
- DRM_PROC_PRINT("%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
- } else {
- DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
+ ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+ minor->proc_root, minor);
+ if (ret) {
+ remove_proc_entry(name, root);
+ minor->proc_root = NULL;
+ DRM_ERROR("Failed to create core drm proc files\n");
+ return ret;
}
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-static int drm__vm_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
- struct drm_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
-
- DRM_PROC_PRINT("slot offset size type flags "
- "address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
- DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0) {
- DRM_PROC_PRINT("none\n");
- } else {
- DRM_PROC_PRINT("%4d\n", map->mtrr);
+ if (dev->driver->proc_init) {
+ ret = dev->driver->proc_init(minor);
+ if (ret) {
+ DRM_ERROR("DRM: Driver failed to initialize "
+ "/proc/dri.\n");
+ return ret;
}
- i++;
- }
-
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vm_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__vm_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__queues_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
- int i;
- struct drm_queue *q;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
}
-
- *start = &buf[offset];
- *eof = 0;
-
- DRM_PROC_PRINT(" ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
- "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->
- write_queue) ? 'w' : '-',
- waitqueue_active(&q->
- flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
-
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_queues_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__queues_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
-/**
- * Called when "/proc/dri/.../bufs" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+ struct drm_minor *minor)
{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
- struct drm_device_dma *dma = dev->dma;
+ struct list_head *pos, *q;
+ struct drm_info_node *tmp;
int i;
- if (!dma || offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
-
- DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count)
- DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i]
- .freelist.count),
- dma->bufs[i].seg_count,
- dma->bufs[i].seg_count
- * (1 << dma->bufs[i].page_order),
- (dma->bufs[i].seg_count
- * (1 << dma->bufs[i].page_order))
- * PAGE_SIZE / 1024);
- }
- DRM_PROC_PRINT("\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- DRM_PROC_PRINT("\n");
- DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+ for (i = 0; i < count; i++) {
+ list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+ tmp = list_entry(pos, struct drm_info_node, list);
+ if (tmp->info_ent == &files[i]) {
+ remove_proc_entry(files[i].name,
+ minor->proc_root);
+ list_del(pos);
+ drm_free(tmp, sizeof(struct drm_info_node),
+ _DRM_DRIVER);
+ }
+ }
}
- DRM_PROC_PRINT("\n");
-
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__bufs_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
- * Called when "/proc/dri/.../vblank" is read.
+ * Cleanup the proc filesystem resources.
*
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
- int crtc;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
-
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- DRM_PROC_PRINT("CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
- DRM_PROC_PRINT("CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- DRM_PROC_PRINT("CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
- DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
- }
-
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__vblank_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
*
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
+ * Remove all proc entries created by proc_init().
*/
-static int drm__clients_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
- struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
- int len = 0;
- struct drm_file *priv;
+ char name[64];
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
+ if (!root || !minor->proc_root)
return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
-
- DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
- list_for_each_entry(priv, &dev->filelist, lhead) {
- DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
- priv->authenticated ? 'y' : 'n',
- priv->minor->index,
- priv->pid,
- priv->uid, priv->magic, priv->ioctl_count);
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-/**
- * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_clients_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__clients_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-struct drm_gem_name_info_data {
- int len;
- char *buf;
- int eof;
-};
+ if (dev->driver->proc_cleanup)
+ dev->driver->proc_cleanup(minor);
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct drm_gem_name_info_data *nid = data;
+ drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
- DRM_INFO("name %d size %zd\n", obj->name, obj->size);
- if (nid->eof)
- return 0;
+ sprintf(name, "%d", minor->index);
+ remove_proc_entry(name, root);
- nid->len += sprintf(&nid->buf[nid->len],
- "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
- atomic_read(&obj->refcount.refcount));
- if (nid->len > DRM_PROC_LIMIT) {
- nid->eof = 1;
- return 0;
- }
return 0;
}
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_gem_name_info_data nid;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- nid.len = sprintf(buf, " name size handles refcount\n");
- nid.buf = buf;
- nid.eof = 0;
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
-
- *start = &buf[offset];
- *eof = 0;
- if (nid.len > request + offset)
- return request;
- *eof = 1;
- return nid.len - offset;
-}
-
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
- DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
- DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
- DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
- DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-#if DRM_DEBUG_CODE
-
-static int drm__vma_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int len = 0;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
-
- DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
- atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
- list_for_each_entry(pt, &dev->vmalist, head) {
- if (!(vma = pt->vma))
- continue;
- DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid,
- vma->vm_start,
- vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- DRM_PROC_PRINT("\n");
- }
-
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int drm_vma_info(char *buf, char **start, off_t offset, int request,
- int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm__vma_info(buf, start, offset, request, eof, data);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b22bf2..48f33be8fd0f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
static int drm_minor_get_id(struct drm_device *dev, int type)
{
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
goto err_mem;
}
} else
- new_minor->dev_root = NULL;
+ new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+ ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+ goto err_g2;
+ }
+#endif
ret = drm_sysfs_device_add(new_minor);
if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
if (minor->type == DRM_MINOR_LEGACY)
drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_cleanup(minor);
+#endif
+
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba39d832..51c5a050aa73 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
- i915_gem_proc.o \
+ i915_gem_debugfs.o \
i915_gem_tiling.o \
intel_display.o \
intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e48b89..a818b377e1f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->space >= n)
return 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
if (ring->head != last_head)
i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
return ret;
}
-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
for (i = 0; i < dwords;) {
int cmd, sz;
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
- return -EINVAL;
+ cmd = buffer[i];
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
OUT_RING(cmd);
while (++i, --sz) {
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
- sizeof(cmd))) {
- return -EINVAL;
- }
- OUT_RING(cmd);
+ OUT_RING(buffer[i]);
}
}
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect __user *boxes,
+ struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect box;
+ struct drm_clip_rect box = boxes[i];
RING_LOCALS;
- if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
- return -EFAULT;
- }
-
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
- drm_i915_cmdbuffer_t * cmd)
+ drm_i915_cmdbuffer_t *cmd,
+ struct drm_clip_rect *cliprects,
+ void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cmd->cliprects, i,
+ ret = i915_emit_box(dev, cliprects, i,
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
- ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+ ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
}
static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch)
+ drm_i915_batchbuffer_t * batch,
+ struct drm_clip_rect *cliprects)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = batch->cliprects;
int nbox = batch->num_cliprects;
int i = 0, count;
RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
+ int ret = i915_emit_box(dev, cliprects, i,
batch->DR1, batch->DR4);
if (ret)
return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
+ struct drm_clip_rect *cliprects = NULL;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect)))
- return -EFAULT;
+ if (batch->num_cliprects < 0)
+ return -EINVAL;
+
+ if (batch->num_cliprects) {
+ cliprects = drm_calloc(batch->num_cliprects,
+ sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(cliprects, batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0)
+ goto fail_free;
+ }
mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch);
+ ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+ drm_free(cliprects,
+ batch->num_cliprects * sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+
return ret;
}
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
+ struct drm_clip_rect *cliprects = NULL;
+ void *batch_data;
int ret;
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (cmdbuf->num_cliprects &&
- DRM_VERIFYAREA_READ(cmdbuf->cliprects,
- cmdbuf->num_cliprects *
- sizeof(struct drm_clip_rect))) {
- DRM_ERROR("Fault accessing cliprects\n");
- return -EFAULT;
+ if (cmdbuf->num_cliprects < 0)
+ return -EINVAL;
+
+ batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
+ if (batch_data == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+ if (ret != 0)
+ goto fail_batch_free;
+
+ if (cmdbuf->num_cliprects) {
+ cliprects = drm_calloc(cmdbuf->num_cliprects,
+ sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ goto fail_batch_free;
+
+ ret = copy_from_user(cliprects, cmdbuf->cliprects,
+ cmdbuf->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0)
+ goto fail_clip_free;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- return ret;
+ goto fail_batch_free;
}
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
+
+fail_batch_free:
+ drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
+fail_clip_free:
+ drm_free(cliprects,
+ cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+
+ return ret;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0bae71..dcb91f5df6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
.get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
- .proc_init = i915_gem_proc_init,
- .proc_cleanup = i915_gem_proc_cleanup,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = i915_gem_debugfs_init,
+ .debugfs_cleanup = i915_gem_debugfs_cleanup,
+#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..c1685d0c704f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
- struct page **page_list;
+ struct page **pages;
+ int pages_refcount;
/**
* Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect __user *boxes,
+ struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
/* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-int i915_gem_proc_init(struct drm_minor *minor);
-void i915_gem_proc_cleanup(struct drm_minor *minor);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
void i915_dump_lru(struct drm_device *dev, const char *where);
+/* i915_debugfs.c */
+int i915_gem_debugfs_init(struct drm_minor *minor);
+void i915_gem_debugfs_cleanup(struct drm_minor *minor);
+
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E22 || \
IS_GM45(dev))
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2)
+ (dev)->pci_device == 0x29D2 || \
+ (IS_IGD(dev)))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+ IS_IGD(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4016cb..b52cba0f16d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_get_pages(struct drm_gem_object *obj);
+static void i915_gem_object_put_pages(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static inline int
+fast_shmem_read(struct page **pages,
+ loff_t page_base, int page_offset,
+ char __user *data,
+ int length)
+{
+ char __iomem *vaddr;
+ int ret;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+ ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ return ret;
+}
+
+static inline int
+slow_shmem_copy(struct page *dst_page,
+ int dst_offset,
+ struct page *src_page,
+ int src_offset,
+ int length)
+{
+ char *dst_vaddr, *src_vaddr;
+
+ dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+ if (dst_vaddr == NULL)
+ return -ENOMEM;
+
+ src_vaddr = kmap_atomic(src_page, KM_USER1);
+ if (src_vaddr == NULL) {
+ kunmap_atomic(dst_vaddr, KM_USER0);
+ return -ENOMEM;
+ }
+
+ memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+ kunmap_atomic(src_vaddr, KM_USER1);
+ kunmap_atomic(dst_vaddr, KM_USER0);
+
+ return 0;
+}
+
+/**
+ * This is the fast shmem pread path, which attempts to copy_from_user directly
+ * from the backing pages of the object to the user's address space. On a
+ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+ */
+static int
+i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+ int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ page_base = (offset & ~(PAGE_SIZE-1));
+ page_offset = offset & (PAGE_SIZE-1);
+ page_length = remain;
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+ ret = fast_shmem_read(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * This is the fallback shmem pread path, which allocates temporary storage
+ * in kernel space to copy_to_user into outside of the struct_mutex, so we
+ * can copy out of the object's backing pages while holding the struct mutex
+ * and not take page faults.
+ */
+static int
+i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
+ ssize_t remain;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, yet we want to hold it while
+ * dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto fail_put_user_pages;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / PAGE_SIZE;
+ shmem_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
+ }
+
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+ for (i = 0; i < pinned_pages; i++) {
+ SetPageDirty(user_pages[i]);
+ page_cache_release(user_pages[i]);
+ }
+ kfree(user_pages);
+
+ return ret;
+}
+
/**
* Reads data from the object referenced by handle.
*
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- ssize_t read;
- loff_t offset;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ if (ret != 0)
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
*/
static inline int
-slow_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+slow_kernel_write(struct io_mapping *mapping,
+ loff_t gtt_base, int gtt_offset,
+ struct page *user_page, int user_offset,
+ int length)
{
- char __iomem *vaddr;
+ char *src_vaddr, *dst_vaddr;
unsigned long unwritten;
- vaddr = io_mapping_map_wc(mapping, page_base);
- if (vaddr == NULL)
- return -EFAULT;
- unwritten = __copy_from_user(vaddr + page_offset,
- user_data, length);
- io_mapping_unmap(vaddr);
+ dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+ src_vaddr = kmap_atomic(user_page, KM_USER1);
+ unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+ kunmap_atomic(src_vaddr, KM_USER1);
+ io_mapping_unmap_atomic(dst_vaddr);
if (unwritten)
return -EFAULT;
return 0;
}
+static inline int
+fast_shmem_write(struct page **pages,
+ loff_t page_base, int page_offset,
+ char __user *data,
+ int length)
+{
+ char __iomem *vaddr;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+ __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ return 0;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
obj_priv = obj->driver_private;
offset = obj_priv->gtt_offset + args->offset;
- obj_priv->dirty = 1;
while (remain > 0) {
/* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
page_offset, user_data, page_length);
/* If we get a fault while copying data, then (presumably) our
- * source page isn't available. In this case, use the
- * non-atomic function
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
*/
- if (ret) {
- ret = slow_user_write (dev_priv->mm.gtt_mapping,
- page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail;
- }
+ if (ret)
+ goto fail;
remain -= page_length;
user_data += page_length;
@@ -315,39 +527,284 @@ fail:
return ret;
}
+/**
+ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ ssize_t remain;
+ loff_t gtt_page_base, offset;
+ loff_t first_data_page, last_data_page, num_pages;
+ loff_t pinned_pages, i;
+ struct page **user_pages;
+ struct mm_struct *mm = current->mm;
+ int gtt_page_offset, data_page_offset, data_page_index, page_length;
int ret;
- loff_t offset;
- ssize_t written;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto out_unpin_pages;
+ }
mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret)
+ goto out_unlock;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ goto out_unpin_object;
+
+ obj_priv = obj->driver_private;
+ offset = obj_priv->gtt_offset + args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * gtt_page_base = page offset within aperture
+ * gtt_page_offset = offset within page in aperture
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ gtt_page_base = offset & PAGE_MASK;
+ gtt_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((gtt_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - gtt_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
+ goto out_unpin_object;
+
+ remain -= page_length;
+ offset += page_length;
+ data_ptr += page_length;
+ }
+
+out_unpin_object:
+ i915_gem_object_unpin(obj);
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out_unpin_pages:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ kfree(user_pages);
+
+ return ret;
+}
+
+/**
+ * This is the fast shmem pwrite path, which attempts to directly
+ * copy_from_user into the kmapped pages backing the object.
+ */
+static int
+i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+ int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+ obj_priv->dirty = 1;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ page_base = (offset & ~(PAGE_SIZE-1));
+ page_offset = offset & (PAGE_SIZE-1);
+ page_length = remain;
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+ ret = fast_shmem_write(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
}
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This avoids taking mmap_sem for faulting on the user's address while the
+ * struct_mutex is held.
+ */
+static int
+i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
+ ssize_t remain;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto fail_put_user_pages;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
offset = args->offset;
+ obj_priv->dirty = 1;
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
- if (written != args->size) {
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / PAGE_SIZE;
+ shmem_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
}
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ kfree(user_pages);
- return 0;
+ return ret;
}
/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0)
- ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
- else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+ dev->gtt_total != 0) {
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+ if (ret == -EFAULT) {
+ ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+ file_priv);
+ }
+ } else {
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+ if (ret == -EFAULT) {
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+ file_priv);
+ }
+ }
#if WATCH_PWRITE
if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
}
static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
+i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count = obj->size / PAGE_SIZE;
int i;
- if (obj_priv->page_list == NULL)
- return;
+ BUG_ON(obj_priv->pages_refcount == 0);
+ if (--obj_priv->pages_refcount != 0)
+ return;
for (i = 0; i < page_count; i++)
- if (obj_priv->page_list[i] != NULL) {
+ if (obj_priv->pages[i] != NULL) {
if (obj_priv->dirty)
- set_page_dirty(obj_priv->page_list[i]);
- mark_page_accessed(obj_priv->page_list[i]);
- page_cache_release(obj_priv->page_list[i]);
+ set_page_dirty(obj_priv->pages[i]);
+ mark_page_accessed(obj_priv->pages[i]);
+ page_cache_release(obj_priv->pages[i]);
}
obj_priv->dirty = 0;
- drm_free(obj_priv->page_list,
+ drm_free(obj_priv->pages,
page_count * sizeof(struct page *),
DRM_MEM_DRIVER);
- obj_priv->page_list = NULL;
+ obj_priv->pages = NULL;
}
static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
}
static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
struct page *page;
int ret;
- if (obj_priv->page_list)
+ if (obj_priv->pages_refcount++ != 0)
return 0;
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->page_list != NULL);
- obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
- DRM_MEM_DRIVER);
- if (obj_priv->page_list == NULL) {
+ BUG_ON(obj_priv->pages != NULL);
+ obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->pages == NULL) {
DRM_ERROR("Faled to allocate page list\n");
+ obj_priv->pages_refcount--;
return -ENOMEM;
}
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
if (IS_ERR(page)) {
ret = PTR_ERR(page);
DRM_ERROR("read_mapping_page failed: %d\n", ret);
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
return ret;
}
- obj_priv->page_list[i] = page;
+ obj_priv->pages[i] = page;
}
return 0;
}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %d at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->page_list,
+ obj_priv->pages,
page_count,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->page_list == NULL)
+ if (obj_priv->pages == NULL)
return;
- drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
- struct drm_device *dev = obj->dev;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->page_list + i, 1);
+ drm_clflush_pages(obj_priv->pages + i, 1);
}
- drm_agp_chipset_flush(dev);
}
/* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (obj_priv->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->page_list + i, 1);
+ drm_clflush_pages(obj_priv->pages + i, 1);
obj_priv->page_cpu_valid[i] = 1;
}
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry)
+ struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry reloc;
- struct drm_i915_gem_relocation_entry __user *relocs;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
entry->offset = obj_priv->gtt_offset;
- relocs = (struct drm_i915_gem_relocation_entry __user *)
- (uintptr_t) entry->relocs_ptr;
/* Apply the relocations, using the GTT aperture to avoid cache
* flushing requirements.
*/
for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_obj_priv;
uint32_t reloc_val, reloc_offset;
uint32_t __iomem *reloc_entry;
- ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
-
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc.target_handle);
+ reloc->target_handle);
if (target_obj == NULL) {
i915_gem_object_unpin(obj);
return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
if (target_obj_priv->gtt_space == NULL) {
DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
+ reloc->target_handle);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.offset > obj->size - 4) {
+ if (reloc->offset > obj->size - 4) {
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->size);
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.offset & 3) {
+ if (reloc->offset & 3) {
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
+ obj, reloc->target_handle,
+ (int) reloc->offset);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
target_obj->pending_write_domain);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
"presumed %08x delta %08x\n",
__func__,
obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
(int) target_obj_priv->gtt_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
+ (int) reloc->presumed_offset,
+ reloc->delta);
#endif
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
drm_gem_object_unreference(target_obj);
continue;
}
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
/* Map the page containing the relocation we're going to
* perform.
*/
- reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
~(PAGE_SIZE - 1)));
reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+ reloc_val = target_obj_priv->gtt_offset + reloc->delta;
#if WATCH_BUF
DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc.offset,
+ obj, (unsigned int) reloc->offset,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
- /* Write the updated presumed offset for this entry back out
- * to the user.
+ /* The updated presumed offset for this entry will be
+ * copied back out to the user.
*/
- reloc.presumed_offset = target_obj_priv->gtt_offset;
- ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return ret;
- }
+ reloc->presumed_offset = target_obj_priv->gtt_offset;
drm_gem_object_unreference(target_obj);
}
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
struct drm_i915_gem_execbuffer *exec,
+ struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
- (uintptr_t) exec->cliprects_ptr;
int nbox = exec->num_cliprects;
int i = 0, count;
uint32_t exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
+ int ret = i915_emit_box(dev, cliprects, i,
exec->DR1, exec->DR4);
if (ret)
return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
return ret;
}
+static int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+ uint32_t buffer_count,
+ struct drm_i915_gem_relocation_entry **relocs)
+{
+ uint32_t reloc_count = 0, reloc_index = 0, i;
+ int ret;
+
+ *relocs = NULL;
+ for (i = 0; i < buffer_count; i++) {
+ if (reloc_count + exec_list[i].relocation_count < reloc_count)
+ return -EINVAL;
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+ if (*relocs == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < buffer_count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ ret = copy_from_user(&(*relocs)[reloc_index],
+ user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(**relocs));
+ if (ret != 0) {
+ drm_free(*relocs, reloc_count * sizeof(**relocs),
+ DRM_MEM_DRIVER);
+ *relocs = NULL;
+ return ret;
+ }
+
+ reloc_index += exec_list[i].relocation_count;
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+ uint32_t buffer_count,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ uint32_t reloc_count = 0, i;
+ int ret;
+
+ for (i = 0; i < buffer_count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ if (ret == 0) {
+ ret = copy_to_user(user_relocs,
+ &relocs[reloc_count],
+ exec_list[i].relocation_count *
+ sizeof(*relocs));
+ }
+
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+
+ return ret;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
- int ret, i, pinned = 0;
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_gem_relocation_entry *relocs;
+ int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
- uint32_t seqno, flush_domains;
+ uint32_t seqno, flush_domains, reloc_index;
int pin_tries;
#if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
+ if (args->num_cliprects != 0) {
+ cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ goto pre_mutex_err;
+
+ ret = copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)
+ (uintptr_t) args->cliprects_ptr,
+ sizeof(*cliprects) * args->num_cliprects);
+ if (ret != 0) {
+ DRM_ERROR("copy %d cliprects failed: %d\n",
+ args->num_cliprects, ret);
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+ &relocs);
+ if (ret != 0)
+ goto pre_mutex_err;
+
mutex_lock(&dev->struct_mutex);
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Pin and relocate */
for (pin_tries = 0; ; pin_tries++) {
ret = 0;
+ reloc_index = 0;
+
for (i = 0; i < args->buffer_count; i++) {
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv,
- &exec_list[i]);
+ &exec_list[i],
+ &relocs[reloc_index]);
if (ret)
break;
pinned = i + 1;
+ reloc_index += exec_list[i].relocation_count;
}
/* success */
if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
#endif
/* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -2751,11 +3297,27 @@ err:
args->buffer_count, ret);
}
+ /* Copy the updated relocations out regardless of current error
+ * state. Failure to update the relocs would mean that the next
+ * time userland calls execbuf, it would do so with presumed offset
+ * state that didn't match the actual object state.
+ */
+ ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+ relocs);
+ if (ret2 != 0) {
+ DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+ if (ret == 0)
+ ret = ret2;
+ }
+
pre_mutex_err:
drm_free(object_list, sizeof(*object_list) * args->buffer_count,
DRM_MEM_DRIVER);
drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
DRM_MEM_DRIVER);
+ drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
+ DRM_MEM_DRIVER);
return ret;
}
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
- dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+ dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
obj = dev_priv->hws_obj;
obj_priv = obj->driver_private;
- kunmap(obj_priv->page_list[0]);
+ kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
goto out;
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+ char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst, KM_USER0);
}
- drm_clflush_pages(obj_priv->page_list, page_count);
+ drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev);
out:
obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+ char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 000000000000..455ec970b385
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define DRM_I915_RING_DEBUG 1
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define ACTIVE_LIST 1
+#define FLUSHING_LIST 2
+#define INACTIVE_LIST 3
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+{
+ if (obj_priv->user_pin_count > 0)
+ return "P";
+ else if (obj_priv->pin_count > 0)
+ return "p";
+ else
+ return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+{
+ switch (obj_priv->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
+}
+
+static int i915_gem_object_list_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
+ struct list_head *head;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+
+ switch (list) {
+ case ACTIVE_LIST:
+ seq_printf(m, "Active:\n");
+ head = &dev_priv->mm.active_list;
+ break;
+ case INACTIVE_LIST:
+ seq_printf(m, "Inctive:\n");
+ head = &dev_priv->mm.inactive_list;
+ break;
+ case FLUSHING_LIST:
+ seq_printf(m, "Flushing:\n");
+ head = &dev_priv->mm.flushing_list;
+ break;
+ default:
+ DRM_INFO("Ooops, unexpected list\n");
+ return 0;
+ }
+
+ list_for_each_entry(obj_priv, head, list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ seq_printf(m, " %p: %s %08x %08x %d",
+ obj,
+ get_pin_flag(obj_priv),
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+
+ if (obj->name)
+ seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
+ seq_printf(m, "\n");
+ }
+ return 0;
+}
+
+static int i915_gem_request_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+
+ seq_printf(m, "Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ return 0;
+}
+
+static int i915_gem_seqno_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hw_status_page != NULL) {
+ seq_printf(m, "Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ seq_printf(m, "Current sequence: hws uninitialized\n");
+ }
+ seq_printf(m, "Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ return 0;
+}
+
+
+static int i915_interrupt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ seq_printf(m, "Interrupt enable: %08x\n",
+ I915_READ(IER));
+ seq_printf(m, "Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ seq_printf(m, "Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ seq_printf(m, "Pipe A stat: %08x\n",
+ I915_READ(PIPEASTAT));
+ seq_printf(m, "Pipe B stat: %08x\n",
+ I915_READ(PIPEBSTAT));
+ seq_printf(m, "Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ if (dev_priv->hw_status_page != NULL) {
+ seq_printf(m, "Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ seq_printf(m, "Current sequence: hws uninitialized\n");
+ }
+ seq_printf(m, "Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ seq_printf(m, "IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
+ return 0;
+}
+
+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+ seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+ if (obj == NULL) {
+ seq_printf(m, "Fenced object[%2d] = unused\n", i);
+ } else {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = obj->driver_private;
+ seq_printf(m, "Fenced object[%2d] = %p: %s "
+ "%08x %08zx %08x %s %08x %08x %d",
+ i, obj, get_pin_flag(obj_priv),
+ obj_priv->gtt_offset,
+ obj->size, obj_priv->stride,
+ get_tiling_flag(obj_priv),
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ if (obj->name)
+ seq_printf(m, " (name: %d)", obj->name);
+ seq_printf(m, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int i915_hws_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+ volatile u32 *hws;
+
+ hws = (volatile u32 *)dev_priv->hw_status_page;
+ if (hws == NULL)
+ return 0;
+
+ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+ seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4,
+ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+ }
+ return 0;
+}
+
+static struct drm_info_list i915_gem_debugfs_list[] = {
+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+ {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_request", i915_gem_request_info, 0},
+ {"i915_gem_seqno", i915_gem_seqno_info, 0},
+ {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+ {"i915_gem_interrupt", i915_interrupt_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0},
+};
+#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
+
+int i915_gem_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(i915_gem_debugfs_list,
+ I915_GEM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+}
+
+void i915_gem_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(i915_gem_debugfs_list,
+ I915_GEM_DEBUGFS_ENTRIES, minor);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de0cd8b..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Keith Packard <keithp@keithp.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-static int i915_gem_active_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Active:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n",
- obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Flushing:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Inactive:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_request_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *gem_request;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list,
- list)
- {
- DRM_PROC_PRINT(" %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- if (dev_priv->hw_status_page != NULL) {
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- } else {
- DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
- }
- DRM_PROC_PRINT("Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-
-static int i915_interrupt_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Interrupt enable: %08x\n",
- I915_READ(IER));
- DRM_PROC_PRINT("Interrupt identity: %08x\n",
- I915_READ(IIR));
- DRM_PROC_PRINT("Interrupt mask: %08x\n",
- I915_READ(IMR));
- DRM_PROC_PRINT("Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- DRM_PROC_PRINT("Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
- DRM_PROC_PRINT("Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
- if (dev_priv->hw_status_page != NULL) {
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- } else {
- DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
- }
- DRM_PROC_PRINT("Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_hws_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int len = 0, i;
- volatile u32 *hws;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- hws = (volatile u32 *)dev_priv->hw_status_page;
- if (hws == NULL) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
- DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4,
- hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static struct drm_proc_list {
- /** file name */
- const char *name;
- /** proc callback*/
- int (*f) (char *, char **, off_t, int, int *, void *);
-} i915_gem_proc_list[] = {
- {"i915_gem_active", i915_gem_active_info},
- {"i915_gem_flushing", i915_gem_flushing_info},
- {"i915_gem_inactive", i915_gem_inactive_info},
- {"i915_gem_request", i915_gem_request_info},
- {"i915_gem_seqno", i915_gem_seqno_info},
- {"i915_gem_interrupt", i915_interrupt_info},
- {"i915_gem_hws", i915_hws_info},
-};
-
-#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-
-int i915_gem_proc_init(struct drm_minor *minor)
-{
- struct proc_dir_entry *ent;
- int i, j;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
- ent = create_proc_entry(i915_gem_proc_list[i].name,
- S_IFREG | S_IRUGO, minor->dev_root);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/.../%s\n",
- i915_gem_proc_list[i].name);
- for (j = 0; j < i; j++)
- remove_proc_entry(i915_gem_proc_list[i].name,
- minor->dev_root);
- return -1;
- }
- ent->read_proc = i915_gem_proc_list[i].f;
- ent->data = minor;
- }
- return 0;
-}
-
-void i915_gem_proc_cleanup(struct drm_minor *minor)
-{
- int i;
-
- if (!minor->dev_root)
- return;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
- remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191ef934..4cce1aef438e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
- IS_GM45(dev)) {
+ } else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* On 915-945 and GM965, channel interleave by the CPU is
- * determined by DCC. The CPU will alternate based on bit 6
- * in interleaved mode, and the GPU will then also alternate
- * on bit 6, 9, and 10 for X, but the CPU may also optionally
- * alternate based on bit 17 (XOR not disabled and XOR
- * bit == 17).
+ /* On mobile 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (IS_I915G(dev) || IS_I915GM(dev) ||
- dcc & DCC_CHANNEL_XOR_DISABLE) {
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
- (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* GM965/GM45 does either bit 11 or bit 17
- * swizzling.
- */
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
- /* Bit 17 or perhaps other swizzling */
+ /* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d899413..377cc588f5e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_IGD_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_IGD_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
#define TV_HOTPLUG_INT_EN (1 << 18)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
+
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
*/
# define TV_ENC_C0_FIX (1 << 10)
/** Bits that must be preserved by software */
-# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
+# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
/** Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715ace3a0..de621aad85b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
/* LVDS capabilities, stored in a dword */
- u8 rsvd2:1;
- u8 lvds_edid:1;
- u8 pixel_dither:1;
- u8 pfit_ratio_auto:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_text_mode_enhanced:1;
u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
u8 rsvd4;
} __attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed3466e83..2b6d44381c31 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+
+ int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (mode->clock > 400000 || mode->clock < 25000)
- return MODE_CLOCK_RANGE;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (!IS_I9XX(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
return MODE_OK;
}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (intel_crtc->pipe == 0)
+ if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- else
+ I915_WRITE(BCLRPAT_A, 0);
+ } else {
adpa |= ADPA_PIPE_B_SELECT;
+ I915_WRITE(BCLRPAT_B, 0);
+ }
I915_WRITE(ADPA, adpa);
}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp;
-
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+ u32 hotplug_en;
+ int i, tries = 0;
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
- do {
- if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
- break;
- msleep(1);
- } while (time_after(timeout, jiffies));
+ if (IS_G4X(dev) && !IS_GM45(dev))
+ tries = 2;
+ else
+ tries = 1;
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ if (IS_GM45(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2834276cb38..d9c50ff94d76 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
} intel_p2_t;
#define INTEL_P2_NUM 2
-
-typedef struct {
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
-} intel_limit_t;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *);
+};
#define I8XX_DOT_MIN 25000
#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
+#define IGD_VCO_MIN 1700000
+#define IGD_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
+/* IGD's Ncounter is a ring counter */
+#define IGD_N_MIN 3
+#define IGD_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
+#define IGD_M_MIN 2
+#define IGD_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
+/* IGD M1 is reserved, and must be 0 */
+#define IGD_M1_MIN 0
+#define IGD_M1_MAX 0
+#define IGD_M2_MIN 0
+#define IGD_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
+#define IGD_P_LVDS_MIN 7
+#define IGD_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
#define INTEL_LIMIT_I8XX_LVDS 1
#define INTEL_LIMIT_I9XX_SDVO_DAC 2
#define INTEL_LIMIT_I9XX_LVDS 3
+#define INTEL_LIMIT_G4X_SDVO 4
+#define INTEL_LIMIT_G4X_HDMI_DAC 5
+#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
+#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
+#define INTEL_LIMIT_IGD_SDVO_DAC 8
+#define INTEL_LIMIT_IGD_LVDS 9
+
+/*The parameter is for SDVO on G4x platform*/
+#define G4X_DOT_SDVO_MIN 25000
+#define G4X_DOT_SDVO_MAX 270000
+#define G4X_VCO_MIN 1750000
+#define G4X_VCO_MAX 3500000
+#define G4X_N_SDVO_MIN 1
+#define G4X_N_SDVO_MAX 4
+#define G4X_M_SDVO_MIN 104
+#define G4X_M_SDVO_MAX 138
+#define G4X_M1_SDVO_MIN 17
+#define G4X_M1_SDVO_MAX 23
+#define G4X_M2_SDVO_MIN 5
+#define G4X_M2_SDVO_MAX 11
+#define G4X_P_SDVO_MIN 10
+#define G4X_P_SDVO_MAX 30
+#define G4X_P1_SDVO_MIN 1
+#define G4X_P1_SDVO_MAX 3
+#define G4X_P2_SDVO_SLOW 10
+#define G4X_P2_SDVO_FAST 10
+#define G4X_P2_SDVO_LIMIT 270000
+
+/*The parameter is for HDMI_DAC on G4x platform*/
+#define G4X_DOT_HDMI_DAC_MIN 22000
+#define G4X_DOT_HDMI_DAC_MAX 400000
+#define G4X_N_HDMI_DAC_MIN 1
+#define G4X_N_HDMI_DAC_MAX 4
+#define G4X_M_HDMI_DAC_MIN 104
+#define G4X_M_HDMI_DAC_MAX 138
+#define G4X_M1_HDMI_DAC_MIN 16
+#define G4X_M1_HDMI_DAC_MAX 23
+#define G4X_M2_HDMI_DAC_MIN 5
+#define G4X_M2_HDMI_DAC_MAX 11
+#define G4X_P_HDMI_DAC_MIN 5
+#define G4X_P_HDMI_DAC_MAX 80
+#define G4X_P1_HDMI_DAC_MIN 1
+#define G4X_P1_HDMI_DAC_MAX 8
+#define G4X_P2_HDMI_DAC_SLOW 10
+#define G4X_P2_HDMI_DAC_FAST 5
+#define G4X_P2_HDMI_DAC_LIMIT 165000
+
+/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
+#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
+#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
+#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
+#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
+#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
+#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
+#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
+
+/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
+#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
+#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
+#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
+#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
+#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
+#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
+#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
+#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
+#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
+#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
+#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
+#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
+#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
+#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
+#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits[] = {
{ /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I8XX_LVDS */
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
*/
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
+ .find_pll = intel_find_best_PLL,
+ },
+ /* below parameter and function is for G4X Chipset Family*/
+ { /* INTEL_LIMIT_G4X_SDVO */
+ .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
+ .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
+ .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
+ .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
+ .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
+ .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
+ .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
+ .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
+ .p2_slow = G4X_P2_SDVO_SLOW,
+ .p2_fast = G4X_P2_SDVO_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_HDMI_DAC */
+ .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
+ .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
+ .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
+ .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
+ .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
+ .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
+ .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
+ .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
+ .p2_slow = G4X_P2_HDMI_DAC_SLOW,
+ .p2_fast = G4X_P2_HDMI_DAC_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+ .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX },
+ .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
+ .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
+ .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
+ .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
+ .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
+ .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
+ .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
+ .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
+ .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+ .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX },
+ .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
+ .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
+ .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
+ .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
+ .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
+ .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
+ .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
+ .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
+ .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_IGD_SDVO */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
},
+ { /* INTEL_LIMIT_IGD_LVDS */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ /* IGD only supports single-channel mode. */
+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
+ },
+
};
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ /* LVDS with dual channel */
+ limit = &intel_limits
+ [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ else
+ /* LVDS with dual channel */
+ limit = &intel_limits
+ [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ } else /* The option is for other outputs */
+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+
+ return limit;
+}
+
static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_I9XX(dev)) {
+ if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+ } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ } else if (IS_IGD(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
-static void intel_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in IGD, n is a ring counter */
+static void igd_clock(int refclk, intel_clock_t *clock)
{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+ if (IS_IGD(dev)) {
+ igd_clock(refclk, clock);
+ return;
+ }
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
{
const intel_limit_t *limit = intel_limit (crtc);
+ struct drm_device *dev = crtc->dev;
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2)
+ if (clock->m1 <= clock->m2 && !IS_IGD(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
return true;
}
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk, intel_clock_t *best_clock)
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
- const intel_limit_t *limit = intel_limit(crtc);
int err = target;
if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
- clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in IGD */
+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- intel_clock(refclk, &clock);
+ intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(crtc, &clock))
continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
return (err != target);
}
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ /* approximately equals target * 0.00488 */
+ int err_most = (target >> 8) + (target >> 10);
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requriment prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirment prefere larger m1,m2, p1 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+ this_err = abs(clock.dot - target) ;
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
return 400000;
else if (IS_I915G(dev))
return 333000;
- else if (IS_I945GM(dev) || IS_845G(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
return 200000;
else if (IS_I915GM(dev)) {
u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const intel_limit_t *limit;
int ret;
drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
refclk = 48000;
}
- ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (IS_IGD(dev))
+ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+ else
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << 16;
+ if (IS_IGD(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+ else
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ if (IS_IGD(dev)) {
+ clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
if (IS_I9XX(dev)) {
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ if (IS_IGD(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
/* XXX: Handle the 100Mhz refclk */
- intel_clock(96000, &clock);
+ intel_clock(dev, 96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
- intel_clock(66000, &clock);
+ intel_clock(dev, 66000, &clock);
} else
- intel_clock(48000, &clock);
+ intel_clock(dev, 48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
else
clock.p2 = 2;
- intel_clock(48000, &clock);
+ intel_clock(dev, 48000, &clock);
}
}
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_I9XX(dev)) {
int found;
+ u32 reg;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
}
- if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+ if (IS_G4X(dev))
+ reg = SDVOC;
+ else
+ reg = SDVOB;
+
+ if (I915_READ(reg) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af98854..6619f26e46a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
pfit_control = 0;
if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither)
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d67369b..ceca9471a75a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
*/
static const struct color_conversion ntsc_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
};
static const struct color_conversion ntsc_m_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
static const struct color_conversion ntsc_j_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
- .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
- .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
static const struct color_conversion ntsc_j_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
- .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
- .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
static const struct color_conversion pal_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
- .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
- .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
};
static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
static const struct color_conversion pal_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
- .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
- .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
};
static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
static const struct color_conversion pal_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
};
static const struct color_conversion pal_m_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
static const struct color_conversion pal_n_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
};
static const struct color_conversion pal_n_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
* Component connections
*/
static const struct color_conversion sdtv_csc_yprpb = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
- .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
- .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
};
static const struct color_conversion hdtv_csc_yprpb = {
- .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
- .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
- .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-443",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
- .sc_reset = TV_SC_RESET_EVERY_8,
- .pal_burst = true,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
.composite_levels = &ntsc_m_levels_composite,
.composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-J",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "PAL-M",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
- .sc_reset = TV_SC_RESET_EVERY_4,
- .pal_burst = false,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
.composite_levels = &pal_m_levels_composite,
.composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
- .clock = 107520,
+ .clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
- .dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
- .clock = 107520,
+ .clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
- .hsync_end = 64, .hblank_end = 128,
+ .hsync_end = 64, .hblank_end = 142,
.hblank_start = 844, .htotal = 863,
.progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 496,
+ .nbr_end = 479,
.burst_ena = false,
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 496,
+ .nbr_end = 479,
.burst_ena = false,
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
- .hblank_start = 2155, .htotal = 2200,
+ .hblank_start = 2155, .htotal = 2201,
.progressive = false, .trilevel_sync = true,
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
- tv_ctl = 0;
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
switch (tv_priv->type) {
default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* dda1 implies valid video levels */
if (tv_mode->dda1_inc) {
scctl1 |= TV_SC_DDA1_EN;
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
}
if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
scctl1 |= TV_SC_DDA3_EN;
scctl1 |= tv_mode->sc_reset;
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
- I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ if (IS_I965G(dev))
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
if (video_levels)
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
}
/*
* A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc) {
+ if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_output);
} else {
crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
type = -1;
}
+ tv_priv->type = type;
+
if (type < 0)
return connector_status_disconnected;
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode_ptr;
struct intel_output *intel_output = to_intel_output(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
- int j;
+ int j, count = 0;
+ u64 tmp;
for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
&& !tv_mode->component_only))
continue;
- mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
- DRM_MEM_DRIVER);
+ mode_ptr = drm_mode_create(connector->dev);
+ if (!mode_ptr)
+ continue;
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
- mode_ptr->clock = (int) (tv_mode->refresh *
- mode_ptr->vtotal *
- mode_ptr->htotal / 1000) / 1000;
+ tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp *= mode_ptr->htotal;
+ tmp = div_u64(tmp, 1000000);
+ mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, mode_ptr);
+ count++;
}
- return 0;
+ return count;
}
static void
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 31400c8ae051..d696f69ebce5 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
.host_reset = host_reset,
};
-const static struct hpsb_address_ops map_ops = {
+static const struct hpsb_address_ops map_ops = {
.read = read_maps,
};
-const static struct hpsb_address_ops fcp_ops = {
+static const struct hpsb_address_ops fcp_ops = {
.write = write_fcp,
};
-const static struct hpsb_address_ops reg_ops = {
+static const struct hpsb_address_ops reg_ops = {
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
.lock64 = lock64_regs,
};
-const static struct hpsb_address_ops config_rom_ops = {
+static const struct hpsb_address_ops config_rom_ops = {
.read = read_config_rom,
};
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cb15bfa38d70..823a6297a1af 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
-static struct ieee1394_device_id dv1394_id_table[] = {
+static const struct ieee1394_device_id dv1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 1a919df809f8..4ca103577c0a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
static void ether1394_host_reset(struct hpsb_host *host);
/* Function for incoming 1394 packets */
-const static struct hpsb_address_ops addr_ops = {
+static const struct hpsb_address_ops addr_ops = {
.write = ether1394_write,
};
@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
return eth1394_new_node(hi, ud);
}
-static struct ieee1394_device_id eth1394_id_table[] = {
+static const struct ieee1394_device_id eth1394_id_table[] = {
{
.match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION),
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 600e391c8fe7..4bc443546e04 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval;
}
-const static struct hpsb_address_ops dummy_ops;
+static const struct hpsb_address_ops dummy_ops;
/* dummy address spaces as lower and upper bounds of the host's a.s. list */
static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 53aada5bbe1e..a6d55bebe61a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
{
struct hpsb_protocol_driver *driver;
- struct ieee1394_device_id *id;
+ const struct ieee1394_device_id *id;
int length = 0;
char *scratch = buf;
@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
{
struct hpsb_protocol_driver *driver;
struct unit_directory *ud;
- struct ieee1394_device_id *id;
+ const struct ieee1394_device_id *id;
/* We only match unit directories */
if (dev->platform_data != &nodemgr_ud_platform_data)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index ee5acdbd114a..749b271d3107 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
* probe function below can implement further protocol
* dependent or vendor dependent checking.
*/
- struct ieee1394_device_id *id_table;
+ const struct ieee1394_device_id *id_table;
/*
* The update function is called when the node has just
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bad66c65b0d6..da5f8829b503 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
-const static struct hpsb_address_ops arm_ops = {
+static const struct hpsb_address_ops arm_ops = {
.read = arm_read,
.write = arm_write,
.lock = arm_lock,
@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
struct raw1394_request __user *r;
+
r = compat_alloc_user_space(sizeof(struct raw1394_request));
#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
C(tag) ||
C(sendb) ||
C(recvb))
- return ERR_PTR(-EFAULT);
+ return (__force const char __user *)ERR_PTR(-EFAULT);
+
return (const char __user *)r;
}
#undef C
@@ -389,6 +391,7 @@ static int
raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
+
if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
P(type) ||
P(error) ||
@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
P(sendb) ||
P(recvb))
return -EFAULT;
+
return sizeof(struct compat_raw1394_req);
}
#undef P
@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
buffer = raw1394_compat_write(buffer);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
+ if (IS_ERR((__force void *)buffer))
+ return PTR_ERR((__force void *)buffer);
} else
#endif
if (count != sizeof(struct raw1394_request)) {
@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
-static struct ieee1394_device_id raw1394_id_table[] = {
+static const struct ieee1394_device_id raw1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f3fd8657ce4b..a51ab233342d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
.host_reset = sbp2_host_reset,
};
-const static struct hpsb_address_ops sbp2_ops = {
+static const struct hpsb_address_ops sbp2_ops = {
.write = sbp2_handle_status_write
};
@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
size_t, u16);
-const static struct hpsb_address_ops sbp2_physdma_ops = {
+static const struct hpsb_address_ops sbp2_physdma_ops = {
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
};
@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
/*
* Interface to driver core and IEEE 1394 core
*/
-static struct ieee1394_device_id sbp2_id_table[] = {
+static const struct ieee1394_device_id sbp2_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
"(firmware_revision 0x%06x, vendor_id 0x%06x,"
" model_id 0x%06x)",
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
- workarounds, firmware_revision,
- ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+ workarounds, firmware_revision, ud->vendor_id,
model);
/* We would need one SCSI host template for each target to adjust
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 679a918a5cc7..d287ba79821d 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
-static struct ieee1394_device_id video1394_id_table[] = {
+static const struct ieee1394_device_id video1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 8d3e4c6f237e..ecb1f6fd6276 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1602,8 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
- netdev->features |= NETIF_F_LLTX;
/* Fill in the port structure */
nesvnic->netdev = netdev;
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index b55d9ccaf33e..32526f103b59 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
}
static const char *debug_fcp_opcode(unsigned int opcode,
- const u8 *data, size_t length)
+ const u8 *data, int length)
{
switch (opcode) {
case AVC_OPCODE_VENDOR: break;
@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
+ case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
}
return "Vendor";
}
-static void debug_fcp(const u8 *data, size_t length)
+static void debug_fcp(const u8 *data, int length)
{
unsigned int subunit_type, subunit_id, op;
const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
- c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
+ if (fdtv->type == FIREDTV_DVB_S2)
+ c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
+ else
+ c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
c->operand[4] = (params->frequency >> 24) & 0xff;
c->operand[5] = (params->frequency >> 16) & 0xff;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 00d46e137b2a..92285d0089c2 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
/* go */
sb->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+
+ return 0;
/* new mountpoint for an already mounted superblock */
already_mounted:
DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
- ret = simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ ret = 0;
goto out_put;
out_error:
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5b91a85fe107..4f08bd995836 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -353,9 +353,6 @@ el2_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &el2_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = eip_poll;
-#endif
retval = register_netdev(dev);
if (retval)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e5ffc1c606c1..f062b424704e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY
Enable the verify after the buffer write useful for debugging purpose.
If unsure, say N.
+config ETHOC
+ tristate "OpenCores 10/100 Mbps Ethernet MAC support"
+ depends on NET_ETHERNET
+ select MII
+ select PHYLIB
+ help
+ Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+
config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 758ecdf4c820..98409c9dd445 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 071a851a2ea1..eac73382c087 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -143,6 +143,22 @@ out:
}
#endif
+static const struct net_device_ops ac_netdev_ops = {
+ .ndo_open = ac_open,
+ .ndo_stop = ac_close_card,
+
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
static int __init ac_probe1(int ioaddr, struct net_device *dev)
{
int i, retval;
@@ -253,11 +269,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
ei_status.block_output = &ac_block_output;
ei_status.get_8390_hdr = &ac_get_8390_hdr;
- dev->open = &ac_open;
- dev->stop = &ac_close_card;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
-#endif
+ dev->netdev_ops = &ac_netdev_ops;
NS8390_init(dev, 0);
retval = register_netdev(dev);
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 54819a34ba0a..7f8325419803 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -171,7 +171,6 @@ static unsigned int cops_debug = COPS_DEBUG;
struct cops_local
{
- struct net_device_stats stats;
int board; /* Holds what board type is. */
int nodeid; /* Set to 1 once have nodeid. */
unsigned char node_acquire; /* Node ID when acquired. */
@@ -197,7 +196,6 @@ static int cops_send_packet (struct sk_buff *skb, struct net_device *dev);
static void set_multicast_list (struct net_device *dev);
static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int cops_close (struct net_device *dev);
-static struct net_device_stats *cops_get_stats (struct net_device *dev);
static void cleanup_card(struct net_device *dev)
{
@@ -260,6 +258,15 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops cops_netdev_ops = {
+ .ndo_open = cops_open,
+ .ndo_stop = cops_close,
+ .ndo_start_xmit = cops_send_packet,
+ .ndo_tx_timeout = cops_timeout,
+ .ndo_do_ioctl = cops_ioctl,
+ .ndo_set_multicast_list = set_multicast_list,
+};
+
/*
* This is the real probe routine. Linux has a history of friendly device
* probes on the ISA bus. A good device probes avoids doing writes, and
@@ -333,16 +340,9 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
/* Copy local board variable to lp struct. */
lp->board = board;
- dev->hard_start_xmit = cops_send_packet;
- dev->tx_timeout = cops_timeout;
+ dev->netdev_ops = &cops_netdev_ops;
dev->watchdog_timeo = HZ * 2;
- dev->get_stats = cops_get_stats;
- dev->open = cops_open;
- dev->stop = cops_close;
- dev->do_ioctl = cops_ioctl;
- dev->set_multicast_list = set_multicast_list;
- dev->mc_list = NULL;
/* Tell the user where the card is and what mode we're in. */
if(board==DAYNA)
@@ -797,7 +797,7 @@ static void cops_rx(struct net_device *dev)
{
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
dev->name);
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
while(pkt_len--) /* Discard packet */
inb(ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -819,7 +819,7 @@ static void cops_rx(struct net_device *dev)
{
printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
dev->name, pkt_len);
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
@@ -836,7 +836,7 @@ static void cops_rx(struct net_device *dev)
if(rsp_type != LAP_RESPONSE)
{
printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
@@ -846,8 +846,8 @@ static void cops_rx(struct net_device *dev)
skb_reset_transport_header(skb); /* Point to data (Skip header). */
/* Update the counters. */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
/* Send packet to a higher place. */
netif_rx(skb);
@@ -858,7 +858,7 @@ static void cops_timeout(struct net_device *dev)
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if(lp->board==TANGENT)
{
if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
@@ -916,8 +916,8 @@ static int cops_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
/* Done sending packet, update counters and cleanup. */
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
dev_kfree_skb (skb);
return 0;
@@ -986,15 +986,6 @@ static int cops_close(struct net_device *dev)
return 0;
}
-/*
- * Get the current statistics.
- * This may be called with the card open or closed.
- */
-static struct net_device_stats *cops_get_stats(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
- return &lp->stats;
-}
#ifdef MODULE
static struct net_device *cops_dev;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dc4d49605603..78cc71469136 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -261,7 +261,6 @@ static unsigned char *ltdmacbuf;
struct ltpc_private
{
- struct net_device_stats stats;
struct atalk_addr my_addr;
};
@@ -699,7 +698,6 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
static struct timer_list ltpc_timer;
static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
-static struct net_device_stats *ltpc_get_stats(struct net_device *dev);
static int read_30 ( struct net_device *dev)
{
@@ -726,8 +724,6 @@ static int sendup_buffer (struct net_device *dev)
int dnode, snode, llaptype, len;
int sklen;
struct sk_buff *skb;
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct net_device_stats *stats = &ltpc_priv->stats;
struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
if (ltc->command != LT_RCVLAP) {
@@ -779,8 +775,8 @@ static int sendup_buffer (struct net_device *dev)
skb_reset_transport_header(skb);
- stats->rx_packets++;
- stats->rx_bytes+=skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
/* toss it onwards */
netif_rx(skb);
@@ -904,10 +900,6 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
/* in kernel 1.3.xx, on entry skb->data points to ddp header,
* and skb->len is the length of the ddp data + ddp header
*/
-
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct net_device_stats *stats = &ltpc_priv->stats;
-
int i;
struct lt_sendlap cbuf;
unsigned char *hdr;
@@ -936,20 +928,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
printk("\n");
}
- stats->tx_packets++;
- stats->tx_bytes+=skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return 0;
}
-static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
-{
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct net_device_stats *stats = &ltpc_priv->stats;
- return stats;
-}
-
/* initialization stuff */
static int __init ltpc_probe_dma(int base, int dma)
@@ -1027,6 +1012,12 @@ static int __init ltpc_probe_dma(int base, int dma)
return (want & 2) ? 3 : 1;
}
+static const struct net_device_ops ltpc_netdev = {
+ .ndo_start_xmit = ltpc_xmit,
+ .ndo_do_ioctl = ltpc_ioctl,
+ .ndo_set_multicast_list = set_multicast_list,
+};
+
struct net_device * __init ltpc_probe(void)
{
struct net_device *dev;
@@ -1133,14 +1124,7 @@ struct net_device * __init ltpc_probe(void)
else
printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
- /* Fill in the fields of the device structure with ethernet-generic values. */
- dev->hard_start_xmit = ltpc_xmit;
- dev->get_stats = ltpc_get_stats;
-
- /* add the ltpc-specific things */
- dev->do_ioctl = &ltpc_ioctl;
-
- dev->set_multicast_list = &set_multicast_list;
+ dev->netdev_ops = &ltpc_netdev;
dev->mc_list = NULL;
dev->base_addr = io;
dev->irq = irq;
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index ced70799b898..18b566ad4fd1 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -249,6 +249,17 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops at1700_netdev_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_tx_timeout = net_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
"signature", the default bit pattern after a reset. This *doesn't* work --
there is no way to reset the bus interface without a complete power-cycle!
@@ -448,13 +459,7 @@ found:
if (net_debug)
printk(version);
- memset(lp, 0, sizeof(struct net_local));
-
- dev->open = net_open;
- dev->stop = net_close;
- dev->hard_start_xmit = net_send_packet;
- dev->set_multicast_list = &set_rx_mode;
- dev->tx_timeout = net_tx_timeout;
+ dev->netdev_ops = &at1700_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index f901fee79a20..9b75aa630062 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -16,6 +16,7 @@
*/
#include "be.h"
+#include <asm/div64.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -290,6 +291,17 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
return &adapter->stats.net_stats;
}
+static u32 be_calc_rate(u64 bytes, unsigned long ticks)
+{
+ u64 rate = bytes;
+
+ do_div(rate, ticks / HZ);
+ rate <<= 3; /* bytes/sec -> bits/sec */
+ do_div(rate, 1000000ul); /* MB/Sec */
+
+ return rate;
+}
+
static void be_tx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -303,11 +315,9 @@ static void be_tx_rate_update(struct be_adapter *adapter)
/* Update tx rate once in two seconds */
if ((now - stats->be_tx_jiffies) > 2 * HZ) {
- u32 r;
- r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) /
- ((now - stats->be_tx_jiffies) / HZ);
- r = r / 1000000; /* M bytes/s */
- stats->be_tx_rate = r * 8; /* M bits/s */
+ stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
+ - stats->be_tx_bytes_prev,
+ now - stats->be_tx_jiffies);
stats->be_tx_jiffies = now;
stats->be_tx_bytes_prev = stats->be_tx_bytes;
}
@@ -599,7 +609,6 @@ static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
ulong now = jiffies;
- u32 rate;
/* Wrapped around */
if (time_before(now, stats->be_rx_jiffies)) {
@@ -611,10 +620,9 @@ static void be_rx_rate_update(struct be_adapter *adapter)
if ((now - stats->be_rx_jiffies) < 2 * HZ)
return;
- rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) /
- ((now - stats->be_rx_jiffies) / HZ);
- rate = rate / 1000000; /* MB/Sec */
- stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */
+ stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
+ - stats->be_rx_bytes_prev,
+ now - stats->be_rx_jiffies);
stats->be_rx_jiffies = now;
stats->be_rx_bytes_prev = stats->be_rx_bytes;
}
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ff6497658a45..7433b88eed7e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -501,6 +501,21 @@ static void net_poll_controller(struct net_device *dev)
}
#endif
+static const struct net_device_ops net_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_tx_timeout = net_timeout,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_get_stats = net_get_stats,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_mac_address = set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = net_poll_controller,
+#endif
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probes avoids doing writes, and
verifies that the correct device exists and functions.
@@ -843,17 +858,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
/* print the ethernet address. */
printk(", MAC %pM", dev->dev_addr);
- dev->open = net_open;
- dev->stop = net_close;
- dev->tx_timeout = net_timeout;
- dev->watchdog_timeo = HZ;
- dev->hard_start_xmit = net_send_packet;
- dev->get_stats = net_get_stats;
- dev->set_multicast_list = set_multicast_list;
- dev->set_mac_address = set_mac_address;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = net_poll_controller;
-#endif
+ dev->netdev_ops = &net_ops;
+ dev->watchdog_timeo = HZ;
printk("\n");
if (net_debug)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 71eaa431371d..714df2b675e6 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,6 +85,8 @@ struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
+ u64 *p_cnt;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
};
struct rx_desc;
@@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */
struct fl_pg_chunk pg_chunk;/* page chunk cache */
unsigned int use_pages; /* whether FL uses pages or sk_buffs */
unsigned int order; /* order of page allocations */
+ unsigned int alloc_size; /* size of allocated buffer */
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -291,6 +294,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
void t3_sge_start(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
+void t3_start_sge_timers(struct adapter *adap);
void t3_stop_sge_timers(struct adapter *adap);
void t3_free_sge_resources(struct adapter *adap);
void t3_sge_err_intr_handler(struct adapter *adapter);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 9ee021e750c8..e508dc32f3ec 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -191,7 +191,8 @@ struct mdio_ops {
};
struct adapter_info {
- unsigned char nports; /* # of ports */
+ unsigned char nports0; /* # of ports on channel 0 */
+ unsigned char nports1; /* # of ports on channel 1 */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned int gpio_out; /* GPIO output settings */
unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
@@ -422,6 +423,7 @@ struct adapter_params {
unsigned short b_wnd[NCCTRL_WIN];
unsigned int nports; /* # of ethernet ports */
+ unsigned int chan_map; /* bitmap of in-use Tx channels */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8be89621bf7..2c2aaa741450 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap)
&adap->params.sge.qset[qset_idx], ntxq, dev,
netdev_get_tx_queue(dev, j));
if (err) {
- t3_stop_sge_timers(adap);
t3_free_sge_resources(adap);
return err;
}
@@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap)
setup_rss(adap);
if (!(adap->flags & NAPI_INIT))
init_napi(adap);
+
+ t3_start_sge_timers(adap);
adap->flags |= FULL_INIT_DONE;
}
@@ -2870,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
t3_resume_ports(adapter);
}
@@ -3002,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev,
static int version_printed;
int i, err, pci_using_dac = 0;
- unsigned long mmio_start, mmio_len;
+ resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
@@ -3082,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
- for (i = 0; i < ai->nports; ++i) {
+ for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
struct net_device *netdev;
netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
@@ -3172,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev,
out_free_dev:
iounmap(adapter->regs);
- for (i = ai->nports - 1; i >= 0; --i)
+ for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
if (adapter->port[i])
free_netdev(adapter->port[i]);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a7555cb3fa4a..26d3587f3399 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -50,6 +50,7 @@
#define SGE_RX_COPY_THRES 256
#define SGE_RX_PULL_LEN 128
+#define SGE_PG_RSVD SMP_CACHE_BYTES
/*
* Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
* It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
@@ -57,8 +58,10 @@
*/
#define FL0_PG_CHUNK_SIZE 2048
#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
#define SGE_RX_DROP_THRES 16
#define RX_RECLAIM_PERIOD (HZ/4)
@@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q)
return q->in_use - r < (q->size >> 1);
}
-static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d)
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+ struct rx_sw_desc *d)
{
- if (q->use_pages) {
- if (d->pg_chunk.page)
- put_page(d->pg_chunk.page);
+ if (q->use_pages && d->pg_chunk.page) {
+ (*d->pg_chunk.p_cnt)--;
+ if (!*d->pg_chunk.p_cnt)
+ pci_unmap_page(pdev,
+ pci_unmap_addr(&d->pg_chunk, mapping),
+ q->alloc_size, PCI_DMA_FROMDEVICE);
+
+ put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
+ pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+ q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
}
@@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
while (q->credits--) {
struct rx_sw_desc *d = &q->sdesc[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
- q->buf_size, PCI_DMA_FROMDEVICE);
- clear_rx_desc(q, d);
+
+ clear_rx_desc(pdev, q, d);
if (++cidx == q->size)
cidx = 0;
}
@@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
return 0;
}
-static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+ unsigned int gen)
+{
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+ struct rx_sw_desc *sd, gfp_t gfp,
unsigned int order)
{
if (!q->pg_chunk.page) {
+ dma_addr_t mapping;
+
q->pg_chunk.page = alloc_pages(gfp, order);
if (unlikely(!q->pg_chunk.page))
return -ENOMEM;
q->pg_chunk.va = page_address(q->pg_chunk.page);
+ q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+ SGE_PG_RSVD;
q->pg_chunk.offset = 0;
+ mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
+ 0, q->alloc_size, PCI_DMA_FROMDEVICE);
+ pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
}
sd->pg_chunk = q->pg_chunk;
+ prefetch(sd->pg_chunk.p_cnt);
+
q->pg_chunk.offset += q->buf_size;
if (q->pg_chunk.offset == (PAGE_SIZE << order))
q->pg_chunk.page = NULL;
@@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
q->pg_chunk.va += q->buf_size;
get_page(q->pg_chunk.page);
}
+
+ if (sd->pg_chunk.offset == 0)
+ *sd->pg_chunk.p_cnt = 1;
+ else
+ *sd->pg_chunk.p_cnt += 1;
+
return 0;
}
@@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
*/
static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
- void *buf_start;
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
unsigned int count = 0;
while (n--) {
+ dma_addr_t mapping;
int err;
if (q->use_pages) {
- if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
+ if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+ q->order))) {
nomem: q->alloc_failed++;
break;
}
- buf_start = sd->pg_chunk.va;
+ mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
+ sd->pg_chunk.offset;
+ pci_unmap_addr_set(sd, dma_addr, mapping);
+
+ add_one_rx_chunk(mapping, d, q->gen);
+ pci_dma_sync_single_for_device(adap->pdev, mapping,
+ q->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
} else {
- struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ void *buf_start;
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
if (!skb)
goto nomem;
sd->skb = skb;
buf_start = skb->data;
- }
-
- err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
- adap->pdev);
- if (unlikely(err)) {
- clear_rx_desc(q, sd);
- break;
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+ q->gen, adap->pdev);
+ if (unlikely(err)) {
+ clear_rx_desc(adap->pdev, q, sd);
+ break;
+ }
}
d++;
@@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- newskb = skb = q->pg_skb;
+ dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+ newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
newskb = alloc_skb(len, GFP_ATOMIC);
if (likely(newskb != NULL)) {
__skb_put(newskb, len);
- pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
PCI_DMA_FROMDEVICE);
memcpy(newskb->data, sd->pg_chunk.va, len);
- pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_device(adap->pdev, dma_addr,
+ len,
+ PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
return NULL;
recycle:
@@ -820,16 +865,25 @@ recycle:
if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
goto recycle;
+ prefetch(sd->pg_chunk.p_cnt);
+
if (!skb)
newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
if (unlikely(!newskb)) {
if (!drop_thres)
return NULL;
goto recycle;
}
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+ PCI_DMA_FROMDEVICE);
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt)
+ pci_unmap_page(adap->pdev,
+ pci_unmap_addr(&sd->pg_chunk, mapping),
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
if (!skb) {
__skb_put(newskb, SGE_RX_PULL_LEN);
memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@@ -1089,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
struct tx_desc *d = &q->desc[pidx];
struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
- cpl->len = htonl(skb->len | 0x80000000);
+ cpl->len = htonl(skb->len);
cntrl = V_TXPKT_INTF(pi->port_id);
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev);
- if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
- !p->fragment) {
+ if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
+ p->csum == htons(0xffff) && !p->fragment) {
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
@@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
len -= offset;
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ pci_unmap_addr(sd, dma_addr),
+ fl->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
- prefetch(&qs->lro_frag_tbl);
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt)
+ pci_unmap_page(adap->pdev,
+ pci_unmap_addr(&sd->pg_chunk, mapping),
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
+
+ prefetch(qs->lro_va);
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
qs->lro_frag_tbl.nr_frags++;
qs->lro_frag_tbl.len = frag_len;
+
if (!complete)
return;
@@ -2236,6 +2300,8 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+ prefetch(&qs->lro_frag_tbl);
+
prefetch(addr);
#if L1_CACHE_BYTES < 128
prefetch(addr + L1_CACHE_BYTES);
@@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
q->fl[0].order = FL0_PG_ORDER;
q->fl[1].order = FL1_PG_ORDER;
+ q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+ q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
spin_lock_irq(&adapter->sge.reg_lock);
/* FL threshold comparison uses < */
ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
q->rspq.phys_addr, q->rspq.size,
- q->fl[0].buf_size, 1, 0);
+ q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
if (ret)
goto err_unlock;
for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
q->fl[i].phys_addr, q->fl[i].size,
- q->fl[i].buf_size, p->cong_thres, 1,
- 0);
+ q->fl[i].buf_size - SGE_PG_RSVD,
+ p->cong_thres, 1, 0);
if (ret)
goto err_unlock;
}
@@ -3044,9 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
V_NEWTIMER(q->rspq.holdoff_tmr));
- mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
- mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
-
return 0;
err_unlock:
@@ -3057,6 +3122,27 @@ err:
}
/**
+ * t3_start_sge_timers - start SGE timer call backs
+ * @adap: the adapter
+ *
+ * Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+ }
+}
+
+/**
* t3_stop_sge_timers - stop SGE timer call backs
* @adap: the adapter
*
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ff262a04ded0..31ed31a3428b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy)
}
static const struct adapter_info t3_adap_info[] = {
- {2, 0,
+ {1, 1, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
&mi1_mdio_ops, "Chelsio PE9000"},
- {2, 0,
+ {1, 1, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
&mi1_mdio_ops, "Chelsio T302"},
- {1, 0,
+ {1, 0, 0,
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
{ 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T310"},
- {2, 0,
+ {1, 1, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
@@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = {
&mi1_mdio_ext_ops, "Chelsio T320"},
{},
{},
- {1, 0,
+ {1, 0, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
{ S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
@@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
unsigned int type)
{
- t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
- t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
- t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
- t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ if (type == F_RESPONSEQ) {
+ /*
+ * Can't write the Response Queue Context bits for
+ * Interrupt Armed or the Reserve bits after the chip
+ * has been initialized out of reset. Writing to these
+ * bits can confuse the hardware.
+ */
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ } else {
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ }
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
+/**
+ * clear_sge_ctxt - completely clear an SGE context
+ * @adapter: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Completely clear an SGE context. Used predominantly at post-reset
+ * initialization. Note in particular that we don't skip writing to any
+ * "sensitive bits" in the contexts the way that t3_sge_write_context()
+ * does ...
+ */
static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
unsigned int type)
{
@@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
- return t3_sge_write_context(adap, id, type);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
F_MTUENABLE | V_WINDOWSCALEMODE(1) |
- V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
+ V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
- V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
+ V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
F_IPV6ENABLE | F_NICMODE);
@@ -3196,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
}
/*
- * Perform the bits of HW initialization that are dependent on the number
- * of available ports.
+ * Perform the bits of HW initialization that are dependent on the Tx
+ * channels being used.
*/
-static void init_hw_for_avail_ports(struct adapter *adap, int nports)
+static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
{
int i;
- if (nports == 1) {
+ if (chan_map != 3) { /* one channel */
t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
- t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
- F_PORT0ACTIVE | F_ENFORCEPKT);
- t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
- } else {
+ t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
+ (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
+ F_TPTXPORT1EN | F_PORT1ACTIVE));
+ t3_write_reg(adap, A_PM1_TX_CFG,
+ chan_map == 1 ? 0xffffffff : 0);
+ } else { /* two channels */
t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
@@ -3517,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
t3_write_reg(adapter, A_PM1_RX_MODE, 0);
t3_write_reg(adapter, A_PM1_TX_MODE, 0);
- init_hw_for_avail_ports(adapter, adapter->params.nports);
+ chan_init_hw(adapter, adapter->params.chan_map);
t3_sge_init(adapter, &adapter->params.sge);
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3754,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.info = ai;
- adapter->params.nports = ai->nports;
+ adapter->params.nports = ai->nports0 + ai->nports1;
+ adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
/*
* We used to only run the "adapter check task" once a second if
@@ -3785,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
- p->nchan = ai->nports;
+ p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
p->pmrx_size = t3_mc7_size(&adapter->pmrx);
p->pmtx_size = t3_mc7_size(&adapter->pmtx);
p->cm_size = t3_mc7_size(&adapter->cm);
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 55625dbbae5a..357f565851ed 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -566,6 +566,18 @@ MODULE_LICENSE("GPL");
outw(CSR0, DEPCA_ADDR);\
outw(STOP, DEPCA_DATA)
+static const struct net_device_ops depca_netdev_ops = {
+ .ndo_open = depca_open,
+ .ndo_start_xmit = depca_start_xmit,
+ .ndo_stop = depca_close,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_do_ioctl = depca_ioctl,
+ .ndo_tx_timeout = depca_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
@@ -793,12 +805,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
}
/* The DEPCA-specific entries in the device structure. */
- dev->open = &depca_open;
- dev->hard_start_xmit = &depca_start_xmit;
- dev->stop = &depca_close;
- dev->set_multicast_list = &set_multicast_list;
- dev->do_ioctl = &depca_ioctl;
- dev->tx_timeout = depca_tx_timeout;
+ dev->netdev_ops = &depca_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->mem_start = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index e187c88ae145..cc2ab6412c73 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -739,6 +739,17 @@ static void __init eepro_print_info (struct net_device *dev)
static const struct ethtool_ops eepro_ethtool_ops;
+static const struct net_device_ops eepro_netdev_ops = {
+ .ndo_open = eepro_open,
+ .ndo_stop = eepro_close,
+ .ndo_start_xmit = eepro_send_packet,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = eepro_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probe avoids doing writes, and
verifies that the correct device exists and functions. */
@@ -851,11 +862,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
}
}
- dev->open = eepro_open;
- dev->stop = eepro_close;
- dev->hard_start_xmit = eepro_send_packet;
- dev->set_multicast_list = &set_multicast_list;
- dev->tx_timeout = eepro_tx_timeout;
+ dev->netdev_ops = &eepro_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &eepro_ethtool_ops;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 9ff3f2f5e382..1686dca28748 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1043,6 +1043,17 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
lp->last_tx = jiffies;
}
+static const struct net_device_ops eexp_netdev_ops = {
+ .ndo_open = eexp_open,
+ .ndo_stop = eexp_close,
+ .ndo_start_xmit = eexp_xmit,
+ .ndo_set_multicast_list = eexp_set_multicast,
+ .ndo_tx_timeout = eexp_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*
* Sanity check the suspected EtherExpress card
* Read hardware address, reset card, size memory and initialize buffer
@@ -1163,11 +1174,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
lp->width = buswidth;
- dev->open = eexp_open;
- dev->stop = eexp_close;
- dev->hard_start_xmit = eexp_xmit;
- dev->set_multicast_list = &eexp_set_multicast;
- dev->tx_timeout = eexp_timeout;
+ dev->netdev_ops = &eexp_netdev_ops;
dev->watchdog_timeo = 2*HZ;
return register_netdev(dev);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 5c048f2fd74f..0d8b6da046f2 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -475,6 +475,17 @@ out:
}
#endif
+static const struct net_device_ops eth16i_netdev_ops = {
+ .ndo_open = eth16i_open,
+ .ndo_stop = eth16i_close,
+ .ndo_start_xmit = eth16i_tx,
+ .ndo_set_multicast_list = eth16i_multicast,
+ .ndo_tx_timeout = eth16i_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
{
struct eth16i_local *lp = netdev_priv(dev);
@@ -549,12 +560,7 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
/* Initialize the device structure */
- memset(lp, 0, sizeof(struct eth16i_local));
- dev->open = eth16i_open;
- dev->stop = eth16i_close;
- dev->hard_start_xmit = eth16i_tx;
- dev->set_multicast_list = eth16i_multicast;
- dev->tx_timeout = eth16i_timeout;
+ dev->netdev_ops = &eth16i_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
new file mode 100644
index 000000000000..91a9b1a33764
--- /dev/null
+++ b/drivers/net/ethoc.c
@@ -0,0 +1,1112 @@
+/*
+ * linux/drivers/net/ethoc.c
+ *
+ * Copyright (C) 2007-2008 Avionic Design Development GmbH
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <net/ethoc.h>
+
+/* register offsets */
+#define MODER 0x00
+#define INT_SOURCE 0x04
+#define INT_MASK 0x08
+#define IPGT 0x0c
+#define IPGR1 0x10
+#define IPGR2 0x14
+#define PACKETLEN 0x18
+#define COLLCONF 0x1c
+#define TX_BD_NUM 0x20
+#define CTRLMODER 0x24
+#define MIIMODER 0x28
+#define MIICOMMAND 0x2c
+#define MIIADDRESS 0x30
+#define MIITX_DATA 0x34
+#define MIIRX_DATA 0x38
+#define MIISTATUS 0x3c
+#define MAC_ADDR0 0x40
+#define MAC_ADDR1 0x44
+#define ETH_HASH0 0x48
+#define ETH_HASH1 0x4c
+#define ETH_TXCTRL 0x50
+
+/* mode register */
+#define MODER_RXEN (1 << 0) /* receive enable */
+#define MODER_TXEN (1 << 1) /* transmit enable */
+#define MODER_NOPRE (1 << 2) /* no preamble */
+#define MODER_BRO (1 << 3) /* broadcast address */
+#define MODER_IAM (1 << 4) /* individual address mode */
+#define MODER_PRO (1 << 5) /* promiscuous mode */
+#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
+#define MODER_LOOP (1 << 7) /* loopback */
+#define MODER_NBO (1 << 8) /* no back-off */
+#define MODER_EDE (1 << 9) /* excess defer enable */
+#define MODER_FULLD (1 << 10) /* full duplex */
+#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
+#define MODER_DCRC (1 << 12) /* delayed CRC enable */
+#define MODER_CRC (1 << 13) /* CRC enable */
+#define MODER_HUGE (1 << 14) /* huge packets enable */
+#define MODER_PAD (1 << 15) /* padding enabled */
+#define MODER_RSM (1 << 16) /* receive small packets */
+
+/* interrupt source and mask registers */
+#define INT_MASK_TXF (1 << 0) /* transmit frame */
+#define INT_MASK_TXE (1 << 1) /* transmit error */
+#define INT_MASK_RXF (1 << 2) /* receive frame */
+#define INT_MASK_RXE (1 << 3) /* receive error */
+#define INT_MASK_BUSY (1 << 4)
+#define INT_MASK_TXC (1 << 5) /* transmit control frame */
+#define INT_MASK_RXC (1 << 6) /* receive control frame */
+
+#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
+#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
+
+#define INT_MASK_ALL ( \
+ INT_MASK_TXF | INT_MASK_TXE | \
+ INT_MASK_RXF | INT_MASK_RXE | \
+ INT_MASK_TXC | INT_MASK_RXC | \
+ INT_MASK_BUSY \
+ )
+
+/* packet length register */
+#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
+#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
+#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
+ PACKETLEN_MAX(max))
+
+/* transmit buffer number register */
+#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
+
+/* control module mode register */
+#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
+#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
+#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
+
+/* MII mode register */
+#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
+#define MIIMODER_NOPRE (1 << 8) /* no preamble */
+
+/* MII command register */
+#define MIICOMMAND_SCAN (1 << 0) /* scan status */
+#define MIICOMMAND_READ (1 << 1) /* read status */
+#define MIICOMMAND_WRITE (1 << 2) /* write control data */
+
+/* MII address register */
+#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
+#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
+#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
+ MIIADDRESS_RGAD(reg))
+
+/* MII transmit data register */
+#define MIITX_DATA_VAL(x) ((x) & 0xffff)
+
+/* MII receive data register */
+#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
+
+/* MII status register */
+#define MIISTATUS_LINKFAIL (1 << 0)
+#define MIISTATUS_BUSY (1 << 1)
+#define MIISTATUS_INVALID (1 << 2)
+
+/* TX buffer descriptor */
+#define TX_BD_CS (1 << 0) /* carrier sense lost */
+#define TX_BD_DF (1 << 1) /* defer indication */
+#define TX_BD_LC (1 << 2) /* late collision */
+#define TX_BD_RL (1 << 3) /* retransmission limit */
+#define TX_BD_RETRY_MASK (0x00f0)
+#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
+#define TX_BD_UR (1 << 8) /* transmitter underrun */
+#define TX_BD_CRC (1 << 11) /* TX CRC enable */
+#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
+#define TX_BD_WRAP (1 << 13)
+#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
+#define TX_BD_READY (1 << 15) /* TX buffer ready */
+#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
+#define TX_BD_LEN_MASK (0xffff << 16)
+
+#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
+ TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
+
+/* RX buffer descriptor */
+#define RX_BD_LC (1 << 0) /* late collision */
+#define RX_BD_CRC (1 << 1) /* RX CRC error */
+#define RX_BD_SF (1 << 2) /* short frame */
+#define RX_BD_TL (1 << 3) /* too long */
+#define RX_BD_DN (1 << 4) /* dribble nibble */
+#define RX_BD_IS (1 << 5) /* invalid symbol */
+#define RX_BD_OR (1 << 6) /* receiver overrun */
+#define RX_BD_MISS (1 << 7)
+#define RX_BD_CF (1 << 8) /* control frame */
+#define RX_BD_WRAP (1 << 13)
+#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
+#define RX_BD_EMPTY (1 << 15)
+#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
+
+#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
+ RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
+
+#define ETHOC_BUFSIZ 1536
+#define ETHOC_ZLEN 64
+#define ETHOC_BD_BASE 0x400
+#define ETHOC_TIMEOUT (HZ / 2)
+#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
+
+/**
+ * struct ethoc - driver-private device structure
+ * @iobase: pointer to I/O memory region
+ * @membase: pointer to buffer memory region
+ * @num_tx: number of send buffers
+ * @cur_tx: last send buffer written
+ * @dty_tx: last buffer actually sent
+ * @num_rx: number of receive buffers
+ * @cur_rx: current receive buffer
+ * @netdev: pointer to network device structure
+ * @napi: NAPI structure
+ * @stats: network device statistics
+ * @msg_enable: device state flags
+ * @rx_lock: receive lock
+ * @lock: device lock
+ * @phy: attached PHY
+ * @mdio: MDIO bus for PHY access
+ * @phy_id: address of attached PHY
+ */
+struct ethoc {
+ void __iomem *iobase;
+ void __iomem *membase;
+
+ unsigned int num_tx;
+ unsigned int cur_tx;
+ unsigned int dty_tx;
+
+ unsigned int num_rx;
+ unsigned int cur_rx;
+
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct net_device_stats stats;
+ u32 msg_enable;
+
+ spinlock_t rx_lock;
+ spinlock_t lock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ s8 phy_id;
+};
+
+/**
+ * struct ethoc_bd - buffer descriptor
+ * @stat: buffer statistics
+ * @addr: physical memory address
+ */
+struct ethoc_bd {
+ u32 stat;
+ u32 addr;
+};
+
+static u32 ethoc_read(struct ethoc *dev, loff_t offset)
+{
+ return ioread32(dev->iobase + offset);
+}
+
+static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
+{
+ iowrite32(data, dev->iobase + offset);
+}
+
+static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
+{
+ loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
+ bd->stat = ethoc_read(dev, offset + 0);
+ bd->addr = ethoc_read(dev, offset + 4);
+}
+
+static void ethoc_write_bd(struct ethoc *dev, int index,
+ const struct ethoc_bd *bd)
+{
+ loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
+ ethoc_write(dev, offset + 0, bd->stat);
+ ethoc_write(dev, offset + 4, bd->addr);
+}
+
+static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
+{
+ u32 imask = ethoc_read(dev, INT_MASK);
+ imask |= mask;
+ ethoc_write(dev, INT_MASK, imask);
+}
+
+static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
+{
+ u32 imask = ethoc_read(dev, INT_MASK);
+ imask &= ~mask;
+ ethoc_write(dev, INT_MASK, imask);
+}
+
+static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
+{
+ ethoc_write(dev, INT_SOURCE, mask);
+}
+
+static void ethoc_enable_rx_and_tx(struct ethoc *dev)
+{
+ u32 mode = ethoc_read(dev, MODER);
+ mode |= MODER_RXEN | MODER_TXEN;
+ ethoc_write(dev, MODER, mode);
+}
+
+static void ethoc_disable_rx_and_tx(struct ethoc *dev)
+{
+ u32 mode = ethoc_read(dev, MODER);
+ mode &= ~(MODER_RXEN | MODER_TXEN);
+ ethoc_write(dev, MODER, mode);
+}
+
+static int ethoc_init_ring(struct ethoc *dev)
+{
+ struct ethoc_bd bd;
+ int i;
+
+ dev->cur_tx = 0;
+ dev->dty_tx = 0;
+ dev->cur_rx = 0;
+
+ /* setup transmission buffers */
+ bd.addr = 0;
+ bd.stat = TX_BD_IRQ | TX_BD_CRC;
+
+ for (i = 0; i < dev->num_tx; i++) {
+ if (i == dev->num_tx - 1)
+ bd.stat |= TX_BD_WRAP;
+
+ ethoc_write_bd(dev, i, &bd);
+ bd.addr += ETHOC_BUFSIZ;
+ }
+
+ bd.addr = dev->num_tx * ETHOC_BUFSIZ;
+ bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
+
+ for (i = 0; i < dev->num_rx; i++) {
+ if (i == dev->num_rx - 1)
+ bd.stat |= RX_BD_WRAP;
+
+ ethoc_write_bd(dev, dev->num_tx + i, &bd);
+ bd.addr += ETHOC_BUFSIZ;
+ }
+
+ return 0;
+}
+
+static int ethoc_reset(struct ethoc *dev)
+{
+ u32 mode;
+
+ /* TODO: reset controller? */
+
+ ethoc_disable_rx_and_tx(dev);
+
+ /* TODO: setup registers */
+
+ /* enable FCS generation and automatic padding */
+ mode = ethoc_read(dev, MODER);
+ mode |= MODER_CRC | MODER_PAD;
+ ethoc_write(dev, MODER, mode);
+
+ /* set full-duplex mode */
+ mode = ethoc_read(dev, MODER);
+ mode |= MODER_FULLD;
+ ethoc_write(dev, MODER, mode);
+ ethoc_write(dev, IPGT, 0x15);
+
+ ethoc_ack_irq(dev, INT_MASK_ALL);
+ ethoc_enable_irq(dev, INT_MASK_ALL);
+ ethoc_enable_rx_and_tx(dev);
+ return 0;
+}
+
+static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
+ struct ethoc_bd *bd)
+{
+ struct net_device *netdev = dev->netdev;
+ unsigned int ret = 0;
+
+ if (bd->stat & RX_BD_TL) {
+ dev_err(&netdev->dev, "RX: frame too long\n");
+ dev->stats.rx_length_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_SF) {
+ dev_err(&netdev->dev, "RX: frame too short\n");
+ dev->stats.rx_length_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_DN) {
+ dev_err(&netdev->dev, "RX: dribble nibble\n");
+ dev->stats.rx_frame_errors++;
+ }
+
+ if (bd->stat & RX_BD_CRC) {
+ dev_err(&netdev->dev, "RX: wrong CRC\n");
+ dev->stats.rx_crc_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_OR) {
+ dev_err(&netdev->dev, "RX: overrun\n");
+ dev->stats.rx_over_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_MISS)
+ dev->stats.rx_missed_errors++;
+
+ if (bd->stat & RX_BD_LC) {
+ dev_err(&netdev->dev, "RX: late collision\n");
+ dev->stats.collisions++;
+ ret++;
+ }
+
+ return ret;
+}
+
+static int ethoc_rx(struct net_device *dev, int limit)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ int count;
+
+ for (count = 0; count < limit; ++count) {
+ unsigned int entry;
+ struct ethoc_bd bd;
+
+ entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & RX_BD_EMPTY)
+ break;
+
+ if (ethoc_update_rx_stats(priv, &bd) == 0) {
+ int size = bd.stat >> 16;
+ struct sk_buff *skb = netdev_alloc_skb(dev, size);
+ if (likely(skb)) {
+ void *src = priv->membase + bd.addr;
+ memcpy_fromio(skb_put(skb, size), src, size);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += size;
+ netif_receive_skb(skb);
+ } else {
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - "
+ "packet dropped\n");
+
+ priv->stats.rx_dropped++;
+ break;
+ }
+ }
+
+ /* clear the buffer descriptor so it can be reused */
+ bd.stat &= ~RX_BD_STATS;
+ bd.stat |= RX_BD_EMPTY;
+ ethoc_write_bd(priv, entry, &bd);
+ priv->cur_rx++;
+ }
+
+ return count;
+}
+
+static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+{
+ struct net_device *netdev = dev->netdev;
+
+ if (bd->stat & TX_BD_LC) {
+ dev_err(&netdev->dev, "TX: late collision\n");
+ dev->stats.tx_window_errors++;
+ }
+
+ if (bd->stat & TX_BD_RL) {
+ dev_err(&netdev->dev, "TX: retransmit limit\n");
+ dev->stats.tx_aborted_errors++;
+ }
+
+ if (bd->stat & TX_BD_UR) {
+ dev_err(&netdev->dev, "TX: underrun\n");
+ dev->stats.tx_fifo_errors++;
+ }
+
+ if (bd->stat & TX_BD_CS) {
+ dev_err(&netdev->dev, "TX: carrier sense lost\n");
+ dev->stats.tx_carrier_errors++;
+ }
+
+ if (bd->stat & TX_BD_STATS)
+ dev->stats.tx_errors++;
+
+ dev->stats.collisions += (bd->stat >> 4) & 0xf;
+ dev->stats.tx_bytes += bd->stat >> 16;
+ dev->stats.tx_packets++;
+ return 0;
+}
+
+static void ethoc_tx(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+
+ while (priv->dty_tx != priv->cur_tx) {
+ unsigned int entry = priv->dty_tx % priv->num_tx;
+ struct ethoc_bd bd;
+
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & TX_BD_READY)
+ break;
+
+ entry = (++priv->dty_tx) % priv->num_tx;
+ (void)ethoc_update_tx_stats(priv, &bd);
+ }
+
+ if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
+ netif_wake_queue(dev);
+
+ ethoc_ack_irq(priv, INT_MASK_TX);
+ spin_unlock(&priv->lock);
+}
+
+static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct ethoc *priv = netdev_priv(dev);
+ u32 pending;
+
+ ethoc_disable_irq(priv, INT_MASK_ALL);
+ pending = ethoc_read(priv, INT_SOURCE);
+ if (unlikely(pending == 0)) {
+ ethoc_enable_irq(priv, INT_MASK_ALL);
+ return IRQ_NONE;
+ }
+
+ ethoc_ack_irq(priv, INT_MASK_ALL);
+
+ if (pending & INT_MASK_BUSY) {
+ dev_err(&dev->dev, "packet dropped\n");
+ priv->stats.rx_dropped++;
+ }
+
+ if (pending & INT_MASK_RX) {
+ if (napi_schedule_prep(&priv->napi))
+ __napi_schedule(&priv->napi);
+ } else {
+ ethoc_enable_irq(priv, INT_MASK_RX);
+ }
+
+ if (pending & INT_MASK_TX)
+ ethoc_tx(dev);
+
+ ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
+ return IRQ_HANDLED;
+}
+
+static int ethoc_get_mac_address(struct net_device *dev, void *addr)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u8 *mac = (u8 *)addr;
+ u32 reg;
+
+ reg = ethoc_read(priv, MAC_ADDR0);
+ mac[2] = (reg >> 24) & 0xff;
+ mac[3] = (reg >> 16) & 0xff;
+ mac[4] = (reg >> 8) & 0xff;
+ mac[5] = (reg >> 0) & 0xff;
+
+ reg = ethoc_read(priv, MAC_ADDR1);
+ mac[0] = (reg >> 8) & 0xff;
+ mac[1] = (reg >> 0) & 0xff;
+
+ return 0;
+}
+
+static int ethoc_poll(struct napi_struct *napi, int budget)
+{
+ struct ethoc *priv = container_of(napi, struct ethoc, napi);
+ int work_done = 0;
+
+ work_done = ethoc_rx(priv->netdev, budget);
+ if (work_done < budget) {
+ ethoc_enable_irq(priv, INT_MASK_RX);
+ napi_complete(napi);
+ }
+
+ return work_done;
+}
+
+static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
+ struct ethoc *priv = bus->priv;
+
+ ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
+ ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
+
+ while (time_before(jiffies, timeout)) {
+ u32 status = ethoc_read(priv, MIISTATUS);
+ if (!(status & MIISTATUS_BUSY)) {
+ u32 data = ethoc_read(priv, MIIRX_DATA);
+ /* reset MII command register */
+ ethoc_write(priv, MIICOMMAND, 0);
+ return data;
+ }
+
+ schedule();
+ }
+
+ return -EBUSY;
+}
+
+static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
+ struct ethoc *priv = bus->priv;
+
+ ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
+ ethoc_write(priv, MIITX_DATA, val);
+ ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
+
+ while (time_before(jiffies, timeout)) {
+ u32 stat = ethoc_read(priv, MIISTATUS);
+ if (!(stat & MIISTATUS_BUSY))
+ return 0;
+
+ schedule();
+ }
+
+ return -EBUSY;
+}
+
+static int ethoc_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void ethoc_mdio_poll(struct net_device *dev)
+{
+}
+
+static int ethoc_mdio_probe(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phy;
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ phy = priv->mdio->phy_map[i];
+ if (phy) {
+ if (priv->phy_id != -1) {
+ /* attach to specified PHY */
+ if (priv->phy_id == phy->addr)
+ break;
+ } else {
+ /* autoselect PHY if none was specified */
+ if (phy->addr != 0)
+ break;
+ }
+ }
+ }
+
+ if (!phy) {
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0,
+ PHY_INTERFACE_MODE_GMII);
+ if (IS_ERR(phy)) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ priv->phy = phy;
+ return 0;
+}
+
+static int ethoc_open(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ unsigned int min_tx = 2;
+ unsigned int num_bd;
+ int ret;
+
+ ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret)
+ return ret;
+
+ /* calculate the number of TX/RX buffers */
+ num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
+ priv->num_tx = min(min_tx, num_bd / 4);
+ priv->num_rx = num_bd - priv->num_tx;
+ ethoc_write(priv, TX_BD_NUM, priv->num_tx);
+
+ ethoc_init_ring(priv);
+ ethoc_reset(priv);
+
+ if (netif_queue_stopped(dev)) {
+ dev_dbg(&dev->dev, " resuming queue\n");
+ netif_wake_queue(dev);
+ } else {
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+ }
+
+ phy_start(priv->phy);
+ napi_enable(&priv->napi);
+
+ if (netif_msg_ifup(priv)) {
+ dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
+ dev->base_addr, dev->mem_start, dev->mem_end);
+ }
+
+ return 0;
+}
+
+static int ethoc_stop(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ napi_disable(&priv->napi);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ ethoc_disable_rx_and_tx(priv);
+ free_irq(dev->irq, dev);
+
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+ struct phy_device *phy = NULL;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd != SIOCGMIIPHY) {
+ if (mdio->phy_id >= PHY_MAX_ADDR)
+ return -ERANGE;
+
+ phy = priv->mdio->phy_map[mdio->phy_id];
+ if (!phy)
+ return -ENODEV;
+ } else {
+ phy = priv->phy;
+ }
+
+ return phy_mii_ioctl(phy, mdio, cmd);
+}
+
+static int ethoc_config(struct net_device *dev, struct ifmap *map)
+{
+ return -ENOSYS;
+}
+
+static int ethoc_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u8 *mac = (u8 *)addr;
+
+ ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
+ (mac[4] << 8) | (mac[5] << 0));
+ ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+
+ return 0;
+}
+
+static void ethoc_set_multicast_list(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 mode = ethoc_read(priv, MODER);
+ struct dev_mc_list *mc = NULL;
+ u32 hash[2] = { 0, 0 };
+
+ /* set loopback mode if requested */
+ if (dev->flags & IFF_LOOPBACK)
+ mode |= MODER_LOOP;
+ else
+ mode &= ~MODER_LOOP;
+
+ /* receive broadcast frames if requested */
+ if (dev->flags & IFF_BROADCAST)
+ mode &= ~MODER_BRO;
+ else
+ mode |= MODER_BRO;
+
+ /* enable promiscuous mode if requested */
+ if (dev->flags & IFF_PROMISC)
+ mode |= MODER_PRO;
+ else
+ mode &= ~MODER_PRO;
+
+ ethoc_write(priv, MODER, mode);
+
+ /* receive multicast frames */
+ if (dev->flags & IFF_ALLMULTI) {
+ hash[0] = 0xffffffff;
+ hash[1] = 0xffffffff;
+ } else {
+ for (mc = dev->mc_list; mc; mc = mc->next) {
+ u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
+ int bit = (crc >> 26) & 0x3f;
+ hash[bit >> 5] |= 1 << (bit & 0x1f);
+ }
+ }
+
+ ethoc_write(priv, ETH_HASH0, hash[0]);
+ ethoc_write(priv, ETH_HASH1, hash[1]);
+}
+
+static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return -ENOSYS;
+}
+
+static void ethoc_tx_timeout(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 pending = ethoc_read(priv, INT_SOURCE);
+ if (likely(pending))
+ ethoc_interrupt(dev->irq, dev);
+}
+
+static struct net_device_stats *ethoc_stats(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct ethoc_bd bd;
+ unsigned int entry;
+ void *dest;
+
+ if (unlikely(skb->len > ETHOC_BUFSIZ)) {
+ priv->stats.tx_errors++;
+ return -EMSGSIZE;
+ }
+
+ entry = priv->cur_tx % priv->num_tx;
+ spin_lock_irq(&priv->lock);
+ priv->cur_tx++;
+
+ ethoc_read_bd(priv, entry, &bd);
+ if (unlikely(skb->len < ETHOC_ZLEN))
+ bd.stat |= TX_BD_PAD;
+ else
+ bd.stat &= ~TX_BD_PAD;
+
+ dest = priv->membase + bd.addr;
+ memcpy_toio(dest, skb->data, skb->len);
+
+ bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
+ bd.stat |= TX_BD_LEN(skb->len);
+ ethoc_write_bd(priv, entry, &bd);
+
+ bd.stat |= TX_BD_READY;
+ ethoc_write_bd(priv, entry, &bd);
+
+ if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
+ dev_dbg(&dev->dev, "stopping queue\n");
+ netif_stop_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+
+ spin_unlock_irq(&priv->lock);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ethoc_netdev_ops = {
+ .ndo_open = ethoc_open,
+ .ndo_stop = ethoc_stop,
+ .ndo_do_ioctl = ethoc_ioctl,
+ .ndo_set_config = ethoc_config,
+ .ndo_set_mac_address = ethoc_set_mac_address,
+ .ndo_set_multicast_list = ethoc_set_multicast_list,
+ .ndo_change_mtu = ethoc_change_mtu,
+ .ndo_tx_timeout = ethoc_tx_timeout,
+ .ndo_get_stats = ethoc_stats,
+ .ndo_start_xmit = ethoc_start_xmit,
+};
+
+/**
+ * ethoc_probe() - initialize OpenCores ethernet MAC
+ * pdev: platform device
+ */
+static int ethoc_probe(struct platform_device *pdev)
+{
+ struct net_device *netdev = NULL;
+ struct resource *res = NULL;
+ struct resource *mmio = NULL;
+ struct resource *mem = NULL;
+ struct ethoc *priv = NULL;
+ unsigned int phy;
+ int ret = 0;
+
+ /* allocate networking device */
+ netdev = alloc_etherdev(sizeof(struct ethoc));
+ if (!netdev) {
+ dev_err(&pdev->dev, "cannot allocate network device\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ platform_set_drvdata(pdev, netdev);
+
+ /* obtain I/O memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ mmio = devm_request_mem_region(&pdev->dev, res->start,
+ res->end - res->start + 1, res->name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->base_addr = mmio->start;
+
+ /* obtain buffer memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ mem = devm_request_mem_region(&pdev->dev, res->start,
+ res->end - res->start + 1, res->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "cannot request memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->mem_start = mem->start;
+ netdev->mem_end = mem->end;
+
+ /* obtain device IRQ number */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain IRQ\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->irq = res->start;
+
+ /* setup driver-private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+
+ priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+ mmio->end - mmio->start + 1);
+ if (!priv->iobase) {
+ dev_err(&pdev->dev, "cannot remap I/O memory space\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
+ mem->end - mem->start + 1);
+ if (!priv->membase) {
+ dev_err(&pdev->dev, "cannot remap memory space\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ /* Allow the platform setup code to pass in a MAC address. */
+ if (pdev->dev.platform_data) {
+ struct ethoc_platform_data *pdata =
+ (struct ethoc_platform_data *)pdev->dev.platform_data;
+ memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
+ priv->phy_id = pdata->phy_id;
+ }
+
+ /* Check that the given MAC address is valid. If it isn't, read the
+ * current MAC from the controller. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ ethoc_get_mac_address(netdev, netdev->dev_addr);
+
+ /* Check the MAC again for validity, if it still isn't choose and
+ * program a random one. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+
+ ethoc_set_mac_address(netdev, netdev->dev_addr);
+
+ /* register MII bus */
+ priv->mdio = mdiobus_alloc();
+ if (!priv->mdio) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ priv->mdio->name = "ethoc-mdio";
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->mdio->name, pdev->id);
+ priv->mdio->read = ethoc_mdio_read;
+ priv->mdio->write = ethoc_mdio_write;
+ priv->mdio->reset = ethoc_mdio_reset;
+ priv->mdio->priv = priv;
+
+ priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mdio->irq) {
+ ret = -ENOMEM;
+ goto free_mdio;
+ }
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ priv->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ dev_err(&netdev->dev, "failed to register MDIO bus\n");
+ goto free_mdio;
+ }
+
+ ret = ethoc_mdio_probe(netdev);
+ if (ret) {
+ dev_err(&netdev->dev, "failed to probe MDIO bus\n");
+ goto error;
+ }
+
+ ether_setup(netdev);
+
+ /* setup the net_device structure */
+ netdev->netdev_ops = &ethoc_netdev_ops;
+ netdev->watchdog_timeo = ETHOC_TIMEOUT;
+ netdev->features |= 0;
+
+ /* setup NAPI */
+ memset(&priv->napi, 0, sizeof(priv->napi));
+ netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+
+ spin_lock_init(&priv->rx_lock);
+ spin_lock_init(&priv->lock);
+
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(&netdev->dev, "failed to register interface\n");
+ goto error;
+ }
+
+ goto out;
+
+error:
+ mdiobus_unregister(priv->mdio);
+free_mdio:
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+free:
+ free_netdev(netdev);
+out:
+ return ret;
+}
+
+/**
+ * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * @pdev: platform device
+ */
+static int ethoc_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ethoc *priv = netdev_priv(netdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (netdev) {
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ if (priv->mdio) {
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+ }
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+static int ethoc_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+#else
+# define ethoc_suspend NULL
+# define ethoc_resume NULL
+#endif
+
+static struct platform_driver ethoc_driver = {
+ .probe = ethoc_probe,
+ .remove = ethoc_remove,
+ .suspend = ethoc_suspend,
+ .resume = ethoc_resume,
+ .driver = {
+ .name = "ethoc",
+ },
+};
+
+static int __init ethoc_init(void)
+{
+ return platform_driver_register(&ethoc_driver);
+}
+
+static void __exit ethoc_exit(void)
+{
+ platform_driver_unregister(&ethoc_driver);
+}
+
+module_init(ethoc_init);
+module_exit(ethoc_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b852303c9362..1a685a04d4b2 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -388,6 +388,18 @@ static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
return err;
}
+static const struct net_device_ops ewrk3_netdev_ops = {
+ .ndo_open = ewrk3_open,
+ .ndo_start_xmit = ewrk3_queue_pkt,
+ .ndo_stop = ewrk3_close,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_do_ioctl = ewrk3_ioctl,
+ .ndo_tx_timeout = ewrk3_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init
ewrk3_hw_init(struct net_device *dev, u_long iobase)
{
@@ -603,16 +615,11 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
printk(version);
}
/* The EWRK3-specific entries in the device structure. */
- dev->open = ewrk3_open;
- dev->hard_start_xmit = ewrk3_queue_pkt;
- dev->stop = ewrk3_close;
- dev->set_multicast_list = set_multicast_list;
- dev->do_ioctl = ewrk3_ioctl;
+ dev->netdev_ops = &ewrk3_netdev_ops;
if (lp->adapter_name[4] == '3')
SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
else
SET_ETHTOOL_OPS(dev, &ethtool_ops);
- dev->tx_timeout = ewrk3_timeout;
dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
dev->mem_start = 0;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 9d81e7a48dba..6a38800be3f1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev)
return err;
}
-static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp)
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
- struct txfcb *fcb;
- struct sk_buff *skb = *skbp;
-
- if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) {
- struct sk_buff *old_skb = skb;
- skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN);
- if (!skb)
- return NULL;
- dev_kfree_skb_any(old_skb);
- }
- fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
+ struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
cacheable_memzero(fcb, GMAC_FCB_LEN);
return fcb;
@@ -1320,6 +1310,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
base = priv->tx_bd_base;
+ /* make space for additional header when fcb is needed */
+ if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+ (skb_headroom(skb) < GMAC_FCB_LEN)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
+ if (!skb_new) {
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ }
+
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1372,20 +1378,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
- fcb = gfar_add_fcb(&skb);
- if (likely(fcb != NULL)) {
- lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb);
- }
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ gfar_tx_checksum(skb, fcb);
}
if (priv->vlgrp && vlan_tx_tag_present(skb)) {
- if (unlikely(NULL == fcb))
- fcb = gfar_add_fcb(&skb);
- if (likely(fcb != NULL)) {
+ if (unlikely(NULL == fcb)) {
+ fcb = gfar_add_fcb(skb);
lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_vlan(skb, fcb);
}
+
+ gfar_tx_vlan(skb, fcb);
}
/* setup the TxBD length and buffer pointer for the first BD */
@@ -1433,7 +1437,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Unlock priv */
spin_unlock_irqrestore(&priv->txlock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 5b5bf9f9861a..c25bc0bc0b25 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -905,6 +905,17 @@ static char *ibmlana_adapter_names[] __devinitdata = {
NULL
};
+
+static const struct net_device_ops ibmlana_netdev_ops = {
+ .ndo_open = ibmlana_open,
+ .ndo_stop = ibmlana_close,
+ .ndo_start_xmit = ibmlana_tx,
+ .ndo_set_multicast_list = ibmlana_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __devinit ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
@@ -973,11 +984,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
mca_device_set_claim(mdev, 1);
/* set methods */
-
- dev->open = ibmlana_open;
- dev->stop = ibmlana_close;
- dev->hard_start_xmit = ibmlana_tx;
- dev->set_multicast_list = ibmlana_set_multicast_list;
+ dev->netdev_ops = &ibmlana_netdev_ops;
dev->flags |= IFF_MULTICAST;
/* copy out MAC address */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 6f3e7f71658d..6b6548b9fda0 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1524,6 +1524,13 @@ toshoboe_close (struct pci_dev *pci_dev)
free_netdev(self->netdev);
}
+static const struct net_device_ops toshoboe_netdev_ops = {
+ .ndo_open = toshoboe_net_open,
+ .ndo_stop = toshoboe_net_close,
+ .ndo_start_xmit = toshoboe_hard_xmit,
+ .ndo_do_ioctl = toshoboe_net_ioctl,
+};
+
static int
toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
{
@@ -1657,10 +1664,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
#endif
SET_NETDEV_DEV(dev, &pci_dev->dev);
- dev->hard_start_xmit = toshoboe_hard_xmit;
- dev->open = toshoboe_net_open;
- dev->stop = toshoboe_net_close;
- dev->do_ioctl = toshoboe_net_ioctl;
+ dev->netdev_ops = &toshoboe_netdev_ops;
err = register_netdev(dev);
if (err)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index d83d4010656d..633808d447be 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -454,6 +454,18 @@ out:
}
#endif
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_stop = lance_close,
+ .ndo_get_stats = lance_get_stats,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
{
struct lance_private *lp;
@@ -714,12 +726,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
printk(version);
/* The LANCE-specific entries in the device structure. */
- dev->open = lance_open;
- dev->hard_start_xmit = lance_start_xmit;
- dev->stop = lance_close;
- dev->get_stats = lance_get_stats;
- dev->set_multicast_list = set_multicast_list;
- dev->tx_timeout = lance_tx_timeout;
+ dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
err = register_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 4d1a059921c6..d44bddbee373 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -952,6 +952,17 @@ static void print_eth(char *add)
(unsigned char) add[12], (unsigned char) add[13]);
}
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init lp486e_probe(struct net_device *dev) {
struct i596_private *lp;
unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
@@ -1014,12 +1025,8 @@ static int __init lp486e_probe(struct net_device *dev) {
printk("\n");
/* The LP486E-specific entries in the device structure. */
- dev->open = &i596_open;
- dev->stop = &i596_close;
- dev->hard_start_xmit = &i596_start_xmit;
- dev->set_multicast_list = &set_multicast_list;
+ dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = 5*HZ;
- dev->tx_timeout = i596_tx_timeout;
#if 0
/* selftest reports 0x320925ae - don't know what that means */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a8bcc00c3302..77d44a061703 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -441,6 +441,18 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops ni52_netdev_ops = {
+ .ndo_open = ni52_open,
+ .ndo_stop = ni52_close,
+ .ndo_get_stats = ni52_get_stats,
+ .ndo_tx_timeout = ni52_timeout,
+ .ndo_start_xmit = ni52_send_packet,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init ni52_probe1(struct net_device *dev, int ioaddr)
{
int i, size, retval;
@@ -561,15 +573,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
printk("IRQ %d (assigned and not checked!).\n", dev->irq);
}
- dev->open = ni52_open;
- dev->stop = ni52_close;
- dev->get_stats = ni52_get_stats;
- dev->tx_timeout = ni52_timeout;
+ dev->netdev_ops = &ni52_netdev_ops;
dev->watchdog_timeo = HZ/20;
- dev->hard_start_xmit = ni52_send_packet;
- dev->set_multicast_list = set_multicast_list;
-
- dev->if_port = 0;
return 0;
out:
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index df5f869e8d8f..6474f02bf783 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -237,7 +237,7 @@ struct priv
void *tmdbounce[TMDNUM];
int tmdbouncenum;
int lock,xmit_queued;
- struct net_device_stats stats;
+
void *self;
int cmdr_addr;
int cardno;
@@ -257,7 +257,6 @@ static void ni65_timeout(struct net_device *dev);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
-static struct net_device_stats *ni65_get_stats(struct net_device *);
static void set_multicast_list(struct net_device *dev);
static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
@@ -401,6 +400,17 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops ni65_netdev_ops = {
+ .ndo_open = ni65_open,
+ .ndo_stop = ni65_close,
+ .ndo_start_xmit = ni65_send_packet,
+ .ndo_tx_timeout = ni65_timeout,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*
* this is the real card probe ..
*/
@@ -549,13 +559,9 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
}
dev->base_addr = ioaddr;
- dev->open = ni65_open;
- dev->stop = ni65_close;
- dev->hard_start_xmit = ni65_send_packet;
- dev->tx_timeout = ni65_timeout;
+ dev->netdev_ops = &ni65_netdev_ops;
dev->watchdog_timeo = HZ/2;
- dev->get_stats = ni65_get_stats;
- dev->set_multicast_list = set_multicast_list;
+
return 0; /* everything is OK */
}
@@ -901,13 +907,13 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
if(debuglevel > 1)
printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
if(csr0 & CSR0_BABL)
- p->stats.tx_errors++;
+ dev->stats.tx_errors++;
if(csr0 & CSR0_MISS) {
int i;
for(i=0;i<RMDNUM;i++)
printk("%02x ",p->rmdhead[i].u.s.status);
printk("\n");
- p->stats.rx_errors++;
+ dev->stats.rx_errors++;
}
if(csr0 & CSR0_MERR) {
if(debuglevel > 1)
@@ -997,12 +1003,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
#endif
/* checking some errors */
if(tmdp->status2 & XMIT_RTRY)
- p->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
if(tmdp->status2 & XMIT_LCAR)
- p->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
/* this stops the xmitter */
- p->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
if(debuglevel > 0)
printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
if(p->features & INIT_RING_BEFORE_START) {
@@ -1016,12 +1022,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
if(debuglevel > 2)
printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
- p->stats.tx_errors++;
+ dev->stats.tx_errors++;
tmdp->status2 = 0;
}
else {
- p->stats.tx_bytes -= (short)(tmdp->blen);
- p->stats.tx_packets++;
+ dev->stats.tx_bytes -= (short)(tmdp->blen);
+ dev->stats.tx_packets++;
}
#ifdef XMT_VIA_SKB
@@ -1057,7 +1063,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
if(!(rmdstat & RCV_ERR)) {
if(rmdstat & RCV_START)
{
- p->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
}
}
@@ -1066,16 +1072,16 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
if(rmdstat & RCV_FRAM)
- p->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if(rmdstat & RCV_OFLO)
- p->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
if(rmdstat & RCV_CRC)
- p->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if(rmdstat & RCV_BUF_ERR)
- p->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
}
if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
- p->stats.rx_errors++;
+ dev->stats.rx_errors++;
}
else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
{
@@ -1106,20 +1112,20 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
skb_put(skb,len);
skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
- p->stats.rx_packets++;
- p->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
else
{
printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
- p->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
}
else {
printk(KERN_INFO "%s: received runt packet\n",dev->name);
- p->stats.rx_errors++;
+ dev->stats.rx_errors++;
}
rmdp->blen = -(R_BUF_SIZE-8);
rmdp->mlen = 0;
@@ -1213,23 +1219,6 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static struct net_device_stats *ni65_get_stats(struct net_device *dev)
-{
-
-#if 0
- int i;
- struct priv *p = dev->ml_priv;
- for(i=0;i<RMDNUM;i++)
- {
- struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
- printk("%02x ",rmdp->u.s.status);
- }
- printk("\n");
-#endif
-
- return &((struct priv *)dev->ml_priv)->stats;
-}
-
static void set_multicast_list(struct net_device *dev)
{
if(!ni65_lance_reinit(dev))
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 12a8ffffeb03..ebbbe09725fe 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -143,6 +143,17 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops seeq8005_netdev_ops = {
+ .ndo_open = seeq8005_open,
+ .ndo_stop = seeq8005_close,
+ .ndo_start_xmit = seeq8005_send_packet,
+ .ndo_tx_timeout = seeq8005_timeout,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probes avoids doing writes, and
verifies that the correct device exists and functions. */
@@ -332,12 +343,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
}
}
#endif
- dev->open = seeq8005_open;
- dev->stop = seeq8005_close;
- dev->hard_start_xmit = seeq8005_send_packet;
- dev->tx_timeout = seeq8005_timeout;
+ dev->netdev_ops = &seeq8005_netdev_ops;
dev->watchdog_timeo = HZ/20;
- dev->set_multicast_list = set_multicast_list;
dev->flags &= ~IFF_MULTICAST;
return 0;
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 2033fee3143a..0291ea098a06 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -142,9 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev)
int base_addr = dev->base_addr;
int irq = dev->irq;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = &ultra_poll;
-#endif
if (base_addr > 0x1ff) /* Check a single specified location. */
return ultra_probe1(dev, base_addr);
else if (base_addr != 0) /* Don't probe at all. */
@@ -199,7 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = ultra_poll,
#endif
};
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index cb6c097a2e0a..7a554adc70fb 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -153,6 +153,22 @@ out:
return ERR_PTR(err);
}
+
+static const struct net_device_ops ultra32_netdev_ops = {
+ .ndo_open = ultra32_open,
+ .ndo_stop = ultra32_close,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
{
int i, edge, media, retval;
@@ -273,11 +289,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
ei_status.block_output = &ultra32_block_output;
ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
ei_status.reset_8390 = &ultra32_reset_8390;
- dev->open = &ultra32_open;
- dev->stop = &ultra32_close;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
-#endif
+
+ dev->netdev_ops = &ultra32_netdev_ops;
NS8390_init(dev, 0);
return 0;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 18d653bbd4e0..9a7973a54116 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -831,6 +831,17 @@ static int __init smc_findirq(int ioaddr)
#endif
}
+static const struct net_device_ops smc_netdev_ops = {
+ .ndo_open = smc_open,
+ .ndo_stop = smc_close,
+ .ndo_start_xmit = smc_wait_to_send_packet,
+ .ndo_tx_timeout = smc_timeout,
+ .ndo_set_multicast_list = smc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*----------------------------------------------------------------------
. Function: smc_probe( int ioaddr )
.
@@ -1044,12 +1055,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
goto err_out;
}
- dev->open = smc_open;
- dev->stop = smc_close;
- dev->hard_start_xmit = smc_wait_to_send_packet;
- dev->tx_timeout = smc_timeout;
+ dev->netdev_ops = &smc_netdev_ops;
dev->watchdog_timeo = HZ/20;
- dev->set_multicast_list = smc_set_multicast_list;
return 0;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index ad3cbc91a8fa..af8f60ca0f57 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1680,6 +1680,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
u8 address, u8 data)
{
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
+ u32 temp;
int ret;
SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
@@ -1688,6 +1689,10 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
if (!ret) {
op = E2P_CMD_EPC_CMD_WRITE_ | address;
smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
ret = smsc911x_eeprom_send_cmd(pdata, op);
}
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 193308118f95..456f8bff40be 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -142,7 +142,7 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
return;
}
-
+static struct net_device_ops madgemc_netdev_ops __read_mostly;
static int __devinit madgemc_probe(struct device *device)
{
@@ -168,7 +168,7 @@ static int __devinit madgemc_probe(struct device *device)
goto getout;
}
- dev->dma = 0;
+ dev->netdev_ops = &madgemc_netdev_ops;
card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
if (card==NULL) {
@@ -348,9 +348,6 @@ static int __devinit madgemc_probe(struct device *device)
memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
- dev->open = madgemc_open;
- dev->stop = madgemc_close;
-
tp->tmspriv = card;
dev_set_drvdata(device, dev);
@@ -758,6 +755,10 @@ static struct mca_driver madgemc_driver = {
static int __init madgemc_init (void)
{
+ madgemc_netdev_ops = tms380tr_netdev_ops;
+ madgemc_netdev_ops.ndo_open = madgemc_open;
+ madgemc_netdev_ops.ndo_stop = madgemc_close;
+
return mca_register_driver (&madgemc_driver);
}
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index b8c955f6d31a..16e8783ee9cd 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -116,6 +116,8 @@ nodev:
return -ENODEV;
}
+static struct net_device_ops proteon_netdev_ops __read_mostly;
+
static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
@@ -167,8 +169,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
tp->tmspriv = NULL;
- dev->open = proteon_open;
- dev->stop = tms380tr_close;
+ dev->netdev_ops = &proteon_netdev_ops;
if (dev->irq == 0)
{
@@ -352,6 +353,10 @@ static int __init proteon_init(void)
struct platform_device *pdev;
int i, num = 0, err = 0;
+ proteon_netdev_ops = tms380tr_netdev_ops;
+ proteon_netdev_ops.ndo_open = proteon_open;
+ proteon_netdev_ops.ndo_stop = tms380tr_close;
+
err = platform_driver_register(&proteon_driver);
if (err)
return err;
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index c0f58f08782c..46db5c5395b2 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -133,6 +133,8 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
return 0;
}
+static struct net_device_ops sk_isa_netdev_ops __read_mostly;
+
static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
@@ -184,8 +186,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
tp->tmspriv = NULL;
- dev->open = sk_isa_open;
- dev->stop = tms380tr_close;
+ dev->netdev_ops = &sk_isa_netdev_ops;
if (dev->irq == 0)
{
@@ -362,6 +363,10 @@ static int __init sk_isa_init(void)
struct platform_device *pdev;
int i, num = 0, err = 0;
+ sk_isa_netdev_ops = tms380tr_netdev_ops;
+ sk_isa_netdev_ops.ndo_open = sk_isa_open;
+ sk_isa_netdev_ops.ndo_stop = tms380tr_close;
+
err = platform_driver_register(&sk_isa_driver);
if (err)
return err;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 9d7db2c8d661..a91d9c55d78e 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -124,7 +124,6 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
static int smctr_get_physical_drop_number(struct net_device *dev);
static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
static int smctr_get_station_id(struct net_device *dev);
-static struct net_device_stats *smctr_get_stats(struct net_device *dev);
static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
__u16 bytes_count);
static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
@@ -3633,6 +3632,14 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops smctr_netdev_ops = {
+ .ndo_open = smctr_open,
+ .ndo_stop = smctr_close,
+ .ndo_start_xmit = smctr_send_packet,
+ .ndo_tx_timeout = smctr_timeout,
+ .ndo_get_stats = smctr_get_stats,
+ .ndo_set_multicast_list = smctr_set_multicast_list,
+};
static int __init smctr_probe1(struct net_device *dev, int ioaddr)
{
@@ -3683,13 +3690,8 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
(unsigned int)dev->base_addr,
dev->irq, tp->rom_base, tp->ram_base);
- dev->open = smctr_open;
- dev->stop = smctr_close;
- dev->hard_start_xmit = smctr_send_packet;
- dev->tx_timeout = smctr_timeout;
+ dev->netdev_ops = &smctr_netdev_ops;
dev->watchdog_timeo = HZ;
- dev->get_stats = smctr_get_stats;
- dev->set_multicast_list = &smctr_set_multicast_list;
return (0);
out:
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a110326dce6f..86a479f61c0c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2009,6 +2009,9 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
+
ucc_geth_memclean(ugeth);
}
@@ -3345,6 +3348,14 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
+ err = init_phy(dev);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot initialize PHY, aborting.",
+ dev->name);
+ return err;
+ }
+
err = ucc_struct_init(ugeth);
if (err) {
if (netif_msg_ifup(ugeth))
@@ -3381,13 +3392,6 @@ static int ucc_geth_open(struct net_device *dev)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = init_phy(dev);
- if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
- goto out_err;
- }
-
phy_start(ugeth->phydev);
err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3430,9 +3434,6 @@ static int ucc_geth_close(struct net_device *dev)
free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
netif_stop_queue(dev);
return 0;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a07ba9371db..1d637f407a0c 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -714,19 +714,19 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
switch (ret)
{
case SDLA_RET_OK:
- flp->stats.tx_packets++;
+ dev->stats.tx_packets++;
ret = DLCI_RET_OK;
break;
case SDLA_RET_CIR_OVERFLOW:
case SDLA_RET_BUF_OVERSIZE:
case SDLA_RET_NO_BUFS:
- flp->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
ret = DLCI_RET_DROP;
break;
default:
- flp->stats.tx_errors++;
+ dev->stats.tx_errors++;
ret = DLCI_RET_ERR;
break;
}
@@ -807,7 +807,7 @@ static void sdla_receive(struct net_device *dev)
if (i == CONFIG_DLCI_MAX)
{
printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
- flp->stats.rx_errors++;
+ dev->stats.rx_errors++;
success = 0;
}
}
@@ -819,7 +819,7 @@ static void sdla_receive(struct net_device *dev)
if (skb == NULL)
{
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- flp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
success = 0;
}
else
@@ -859,7 +859,7 @@ static void sdla_receive(struct net_device *dev)
if (success)
{
- flp->stats.rx_packets++;
+ dev->stats.rx_packets++;
dlp = netdev_priv(master);
(*dlp->receive)(skb, master);
}
@@ -1590,13 +1590,14 @@ fail:
return err;
}
-static struct net_device_stats *sdla_stats(struct net_device *dev)
-{
- struct frad_local *flp;
- flp = netdev_priv(dev);
-
- return(&flp->stats);
-}
+static const struct net_device_ops sdla_netdev_ops = {
+ .ndo_open = sdla_open,
+ .ndo_stop = sdla_close,
+ .ndo_do_ioctl = sdla_ioctl,
+ .ndo_set_config = sdla_set_config,
+ .ndo_start_xmit = sdla_transmit,
+ .ndo_change_mtu = sdla_change_mtu,
+};
static void setup_sdla(struct net_device *dev)
{
@@ -1604,20 +1605,13 @@ static void setup_sdla(struct net_device *dev)
netdev_boot_setup_check(dev);
+ dev->netdev_ops = &sdla_netdev_ops;
dev->flags = 0;
dev->type = 0xFFFF;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->mtu = SDLA_MAX_MTU;
- dev->open = sdla_open;
- dev->stop = sdla_close;
- dev->do_ioctl = sdla_ioctl;
- dev->set_config = sdla_set_config;
- dev->get_stats = sdla_stats;
- dev->hard_start_xmit = sdla_transmit;
- dev->change_mtu = sdla_change_mtu;
-
flp->activate = sdla_activate;
flp->deactivate = sdla_deactivate;
flp->assoc = sdla_assoc;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 612fffe100a6..8a0823588c51 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -485,6 +485,7 @@ config MWL8K
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/ath5k/Kconfig"
source "drivers/net/wireless/ath9k/Kconfig"
+source "drivers/net/wireless/ar9170/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index d780487c420f..50e7fba7f0ea 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,5 +57,6 @@ obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K) += ath9k/
+obj-$(CONFIG_AR9170_USB) += ar9170/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ar9170/Kconfig
new file mode 100644
index 000000000000..de4281fda129
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Kconfig
@@ -0,0 +1,17 @@
+config AR9170_USB
+ tristate "Atheros AR9170 802.11n USB support"
+ depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
+ select FW_LOADER
+ help
+ This is a driver for the Atheros "otus" 802.11n USB devices.
+
+ These devices require additional firmware (2 files).
+ For now, these files can be downloaded from here:
+ http://wireless.kernel.org/en/users/Drivers/ar9170
+
+ If you choose to build a module, it'll be called ar9170usb.
+
+config AR9170_LEDS
+ bool
+ depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB)
+ default y
diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ar9170/Makefile
new file mode 100644
index 000000000000..8d91c7ee3215
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Makefile
@@ -0,0 +1,3 @@
+ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o
+
+obj-$(CONFIG_AR9170_USB) += ar9170usb.o
diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ar9170/ar9170.h
new file mode 100644
index 000000000000..f4fb2e94aea0
--- /dev/null
+++ b/drivers/net/wireless/ar9170/ar9170.h
@@ -0,0 +1,209 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * Driver specific definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __AR9170_H
+#define __AR9170_H
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <net/wireless.h>
+#include <net/mac80211.h>
+#ifdef CONFIG_AR9170_LEDS
+#include <linux/leds.h>
+#endif /* CONFIG_AR9170_LEDS */
+#include "eeprom.h"
+#include "hw.h"
+
+#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
+
+enum ar9170_bw {
+ AR9170_BW_20,
+ AR9170_BW_40_BELOW,
+ AR9170_BW_40_ABOVE,
+
+ __AR9170_NUM_BW,
+};
+
+enum ar9170_rf_init_mode {
+ AR9170_RFI_NONE,
+ AR9170_RFI_WARM,
+ AR9170_RFI_COLD,
+};
+
+#define AR9170_MAX_RX_BUFFER_SIZE 8192
+
+#ifdef CONFIG_AR9170_LEDS
+struct ar9170;
+
+struct ar9170_led {
+ struct ar9170 *ar;
+ struct led_classdev l;
+ char name[32];
+ unsigned int toggled;
+ bool registered;
+};
+
+#endif /* CONFIG_AR9170_LEDS */
+
+enum ar9170_device_state {
+ AR9170_UNKNOWN_STATE,
+ AR9170_STOPPED,
+ AR9170_IDLE,
+ AR9170_STARTED,
+ AR9170_ASSOCIATED,
+};
+
+struct ar9170 {
+ struct ieee80211_hw *hw;
+ struct mutex mutex;
+ enum ar9170_device_state state;
+
+ int (*open)(struct ar9170 *);
+ void (*stop)(struct ar9170 *);
+ int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int);
+ int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
+ void *, u32 , void *);
+ void (*callback_cmd)(struct ar9170 *, u32 , void *);
+
+ /* interface mode settings */
+ struct ieee80211_vif *vif;
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+
+ /* beaconing */
+ struct sk_buff *beacon;
+ struct work_struct beacon_work;
+
+ /* cryptographic engine */
+ u64 usedkeys;
+ bool rx_software_decryption;
+ bool disable_offload;
+
+ /* filter settings */
+ struct work_struct filter_config_work;
+ u64 cur_mc_hash, want_mc_hash;
+ u32 cur_filter, want_filter;
+ unsigned int filter_changed;
+ bool sniffer_enabled;
+
+ /* PHY */
+ struct ieee80211_channel *channel;
+ int noise[4];
+
+ /* power calibration data */
+ u8 power_5G_leg[4];
+ u8 power_2G_cck[4];
+ u8 power_2G_ofdm[4];
+ u8 power_5G_ht20[8];
+ u8 power_5G_ht40[8];
+ u8 power_2G_ht20[8];
+ u8 power_2G_ht40[8];
+
+#ifdef CONFIG_AR9170_LEDS
+ struct delayed_work led_work;
+ struct ar9170_led leds[AR9170_NUM_LEDS];
+#endif /* CONFIG_AR9170_LEDS */
+
+ /* qos queue settings */
+ spinlock_t tx_stats_lock;
+ struct ieee80211_tx_queue_stats tx_stats[5];
+ struct ieee80211_tx_queue_params edcf[5];
+
+ spinlock_t cmdlock;
+ __le32 cmdbuf[PAYLOAD_MAX + 1];
+
+ /* MAC statistics */
+ struct ieee80211_low_level_stats stats;
+
+ /* EEPROM */
+ struct ar9170_eeprom eeprom;
+
+ /* global tx status for unregistered Stations. */
+ struct sk_buff_head global_tx_status;
+ struct sk_buff_head global_tx_status_waste;
+ struct delayed_work tx_status_janitor;
+};
+
+struct ar9170_sta_info {
+ struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
+};
+
+#define IS_STARTED(a) (a->state >= AR9170_STARTED)
+#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE)
+
+#define AR9170_FILTER_CHANGED_PROMISC BIT(0)
+#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
+#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
+
+/* exported interface */
+void *ar9170_alloc(size_t priv_size);
+int ar9170_register(struct ar9170 *ar, struct device *pdev);
+void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
+void ar9170_unregister(struct ar9170 *ar);
+void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
+ bool update_statistics, u16 tx_status);
+
+/* MAC */
+int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int ar9170_init_mac(struct ar9170 *ar);
+int ar9170_set_qos(struct ar9170 *ar);
+int ar9170_update_multicast(struct ar9170 *ar);
+int ar9170_update_frame_filter(struct ar9170 *ar);
+int ar9170_set_operating_mode(struct ar9170 *ar);
+int ar9170_set_beacon_timers(struct ar9170 *ar);
+int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
+int ar9170_update_beacon(struct ar9170 *ar);
+void ar9170_new_beacon(struct work_struct *work);
+int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
+ u8 keyidx, u8 *keydata, int keylen);
+int ar9170_disable_key(struct ar9170 *ar, u8 id);
+
+/* LEDs */
+#ifdef CONFIG_AR9170_LEDS
+int ar9170_register_leds(struct ar9170 *ar);
+void ar9170_unregister_leds(struct ar9170 *ar);
+#endif /* CONFIG_AR9170_LEDS */
+int ar9170_init_leds(struct ar9170 *ar);
+int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state);
+
+/* PHY / RF */
+int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band);
+int ar9170_init_rf(struct ar9170 *ar);
+int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
+ enum ar9170_rf_init_mode rfi, enum ar9170_bw bw);
+
+#endif /* __AR9170_H */
diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ar9170/cmd.c
new file mode 100644
index 000000000000..f57a6200167b
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.c
@@ -0,0 +1,129 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * Basic HW register/memory/command access functions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ar9170.h"
+#include "cmd.h"
+
+int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
+{
+ int err;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return 0;
+
+ err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
+ if (err)
+ printk(KERN_DEBUG "%s: writing memory failed\n",
+ wiphy_name(ar->hw->wiphy));
+ return err;
+}
+
+int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
+{
+ __le32 buf[2] = {
+ cpu_to_le32(reg),
+ cpu_to_le32(val),
+ };
+ int err;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return 0;
+
+ err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
+ (u8 *) buf, 0, NULL);
+ if (err)
+ printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n",
+ wiphy_name(ar->hw->wiphy), reg, val);
+ return err;
+}
+
+static int ar9170_read_mreg(struct ar9170 *ar, int nregs,
+ const u32 *regs, u32 *out)
+{
+ int i, err;
+ __le32 *offs, *res;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return 0;
+
+ /* abuse "out" for the register offsets, must be same length */
+ offs = (__le32 *)out;
+ for (i = 0; i < nregs; i++)
+ offs[i] = cpu_to_le32(regs[i]);
+
+ /* also use the same buffer for the input */
+ res = (__le32 *)out;
+
+ err = ar->exec_cmd(ar, AR9170_CMD_RREG,
+ 4 * nregs, (u8 *)offs,
+ 4 * nregs, (u8 *)res);
+ if (err)
+ return err;
+
+ /* convert result to cpu endian */
+ for (i = 0; i < nregs; i++)
+ out[i] = le32_to_cpu(res[i]);
+
+ return 0;
+}
+
+int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
+{
+ return ar9170_read_mreg(ar, 1, &reg, val);
+}
+
+int ar9170_echo_test(struct ar9170 *ar, u32 v)
+{
+ __le32 echobuf = cpu_to_le32(v);
+ __le32 echores;
+ int err;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return -ENODEV;
+
+ err = ar->exec_cmd(ar, AR9170_CMD_ECHO,
+ 4, (u8 *)&echobuf,
+ 4, (u8 *)&echores);
+ if (err)
+ return err;
+
+ if (echobuf != echores)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ar9170/cmd.h b/drivers/net/wireless/ar9170/cmd.h
new file mode 100644
index 000000000000..a4f0e50e52b4
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.h
@@ -0,0 +1,91 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * Basic HW register/memory/command access functions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CMD_H
+#define __CMD_H
+
+#include "ar9170.h"
+
+/* basic HW access */
+int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
+int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
+int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
+int ar9170_echo_test(struct ar9170 *ar, u32 v);
+
+/*
+ * Macros to facilitate writing multiple registers in a single
+ * write-combining USB command. Note that when the first group
+ * fails the whole thing will fail without any others attempted,
+ * but you won't know which write in the group failed.
+ */
+#define ar9170_regwrite_begin(ar) \
+do { \
+ int __nreg = 0, __err = 0; \
+ struct ar9170 *__ar = ar;
+
+#define ar9170_regwrite(r, v) do { \
+ __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \
+ __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \
+ __nreg++; \
+ if ((__nreg >= PAYLOAD_MAX/2)) { \
+ if (IS_ACCEPTING_CMD(__ar)) \
+ __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
+ 8 * __nreg, \
+ (u8 *) &__ar->cmdbuf[1], \
+ 0, NULL); \
+ __nreg = 0; \
+ if (__err) \
+ goto __regwrite_out; \
+ } \
+} while (0)
+
+#define ar9170_regwrite_finish() \
+__regwrite_out : \
+ if (__nreg) { \
+ if (IS_ACCEPTING_CMD(__ar)) \
+ __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
+ 8 * __nreg, \
+ (u8 *) &__ar->cmdbuf[1], \
+ 0, NULL); \
+ __nreg = 0; \
+ }
+
+#define ar9170_regwrite_result() \
+ __err; \
+} while (0);
+
+#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ar9170/eeprom.h b/drivers/net/wireless/ar9170/eeprom.h
new file mode 100644
index 000000000000..d2c8cc83f1dd
--- /dev/null
+++ b/drivers/net/wireless/ar9170/eeprom.h
@@ -0,0 +1,179 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * EEPROM layout
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __AR9170_EEPROM_H
+#define __AR9170_EEPROM_H
+
+#define AR5416_MAX_CHAINS 2
+#define AR5416_MODAL_SPURS 5
+
+struct ar9170_eeprom_modal {
+ __le32 antCtrlChain[AR5416_MAX_CHAINS];
+ __le32 antCtrlCommon;
+ s8 antennaGainCh[AR5416_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR5416_MAX_CHAINS];
+ u8 rxTxMarginCh[AR5416_MAX_CHAINS];
+ s8 adcDesiredSize;
+ s8 pgaDesiredSize;
+ u8 xlnaGainCh[AR5416_MAX_CHAINS];
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ s8 iqCalICh[AR5416_MAX_CHAINS];
+ s8 iqCalQCh[AR5416_MAX_CHAINS];
+ u8 pdGainOverlap;
+ u8 ob;
+ u8 db;
+ u8 xpaBiasLvl;
+ u8 pwrDecreaseFor2Chain;
+ u8 pwrDecreaseFor3Chain;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR5416_MAX_CHAINS];
+ u8 bswMargin[AR5416_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 reserved[22];
+ struct spur_channel {
+ __le16 spurChan;
+ u8 spurRangeLow;
+ u8 spurRangeHigh;
+ } __packed spur_channels[AR5416_MODAL_SPURS];
+} __packed;
+
+#define AR5416_NUM_PD_GAINS 4
+#define AR5416_PD_GAIN_ICEPTS 5
+
+struct ar9170_calibration_data_per_freq {
+ u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+} __packed;
+
+#define AR5416_NUM_5G_CAL_PIERS 8
+#define AR5416_NUM_2G_CAL_PIERS 4
+
+#define AR5416_NUM_5G_TARGET_PWRS 8
+#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
+#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
+#define AR5416_MAX_NUM_TGT_PWRS 8
+
+struct ar9170_calibration_target_power_legacy {
+ u8 freq;
+ u8 power[4];
+} __packed;
+
+struct ar9170_calibration_target_power_ht {
+ u8 freq;
+ u8 power[8];
+} __packed;
+
+#define AR5416_NUM_CTLS 24
+
+struct ar9170_calctl_edges {
+ u8 channel;
+#define AR9170_CALCTL_EDGE_FLAGS 0xC0
+ u8 power_flags;
+} __packed;
+
+#define AR5416_NUM_BAND_EDGES 8
+
+struct ar9170_calctl_data {
+ struct ar9170_calctl_edges
+ control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
+} __packed;
+
+
+struct ar9170_eeprom {
+ __le16 length;
+ __le16 checksum;
+ __le16 version;
+ u8 operating_flags;
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
+ u8 misc;
+ __le16 reg_domain[2];
+ u8 mac_address[6];
+ u8 rx_mask;
+ u8 tx_mask;
+ __le16 rf_silent;
+ __le16 bluetooth_options;
+ __le16 device_capabilities;
+ __le32 build_number;
+ u8 deviceType;
+ u8 reserved[33];
+
+ u8 customer_data[64];
+
+ struct ar9170_eeprom_modal
+ modal_header[2];
+
+ u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
+ u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
+
+ struct ar9170_calibration_data_per_freq
+ cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
+ cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
+
+ /* power calibration data */
+ struct ar9170_calibration_target_power_legacy
+ cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
+ struct ar9170_calibration_target_power_ht
+ cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
+ cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
+
+ struct ar9170_calibration_target_power_legacy
+ cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
+ cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
+ struct ar9170_calibration_target_power_ht
+ cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
+ cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
+
+ /* conformance testing limits */
+ u8 ctl_index[AR5416_NUM_CTLS];
+ struct ar9170_calctl_data
+ ctl_data[AR5416_NUM_CTLS];
+
+ u8 pad;
+ __le16 subsystem_id;
+} __packed;
+
+#endif /* __AR9170_EEPROM_H */
diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h
new file mode 100644
index 000000000000..13091bd9d815
--- /dev/null
+++ b/drivers/net/wireless/ar9170/hw.h
@@ -0,0 +1,417 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * Hardware-specific definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __AR9170_HW_H
+#define __AR9170_HW_H
+
+#define AR9170_MAX_CMD_LEN 64
+
+enum ar9170_cmd {
+ AR9170_CMD_RREG = 0x00,
+ AR9170_CMD_WREG = 0x01,
+ AR9170_CMD_RMEM = 0x02,
+ AR9170_CMD_WMEM = 0x03,
+ AR9170_CMD_BITAND = 0x04,
+ AR9170_CMD_BITOR = 0x05,
+ AR9170_CMD_EKEY = 0x28,
+ AR9170_CMD_DKEY = 0x29,
+ AR9170_CMD_FREQUENCY = 0x30,
+ AR9170_CMD_RF_INIT = 0x31,
+ AR9170_CMD_SYNTH = 0x32,
+ AR9170_CMD_FREQ_START = 0x33,
+ AR9170_CMD_ECHO = 0x80,
+ AR9170_CMD_TALLY = 0x81,
+ AR9170_CMD_TALLY_APD = 0x82,
+ AR9170_CMD_CONFIG = 0x83,
+ AR9170_CMD_RESET = 0x90,
+ AR9170_CMD_DKRESET = 0x91,
+ AR9170_CMD_DKTX_STATUS = 0x92,
+ AR9170_CMD_FDC = 0xA0,
+ AR9170_CMD_WREEPROM = 0xB0,
+ AR9170_CMD_WFLASH = 0xB0,
+ AR9170_CMD_FLASH_ERASE = 0xB1,
+ AR9170_CMD_FLASH_PROG = 0xB2,
+ AR9170_CMD_FLASH_CHKSUM = 0xB3,
+ AR9170_CMD_FLASH_READ = 0xB4,
+ AR9170_CMD_FW_DL_INIT = 0xB5,
+ AR9170_CMD_MEM_WREEPROM = 0xBB,
+};
+
+/* endpoints */
+#define AR9170_EP_TX 1
+#define AR9170_EP_RX 2
+#define AR9170_EP_IRQ 3
+#define AR9170_EP_CMD 4
+
+#define AR9170_EEPROM_START 0x1600
+
+#define AR9170_GPIO_REG_BASE 0x1d0100
+#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE
+#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4)
+#define AR9170_NUM_LEDS 2
+
+
+#define AR9170_USB_REG_BASE 0x1e1000
+#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
+#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1
+#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2
+#define AR9170_DMA_CTL_HIGH_SPEED 0x4
+#define AR9170_DMA_CTL_PACKET_MODE 0x8
+
+#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
+#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
+
+
+
+#define AR9170_MAC_REG_BASE 0x1c3000
+
+#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
+#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
+
+#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C)
+#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
+#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
+
+#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
+#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
+#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
+#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
+
+#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
+#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
+
+#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C)
+
+#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
+#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
+#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
+#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
+#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
+#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C)
+
+#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
+#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
+#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0)
+#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000
+#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
+#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3)
+#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70
+
+#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
+#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
+
+#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
+#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0)
+#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1)
+#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2)
+#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3)
+#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4)
+#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5)
+#define AR9170_MAC_REG_FTF_BIT6 BIT(6)
+#define AR9170_MAC_REG_FTF_BIT7 BIT(7)
+#define AR9170_MAC_REG_FTF_BEACON BIT(8)
+#define AR9170_MAC_REG_FTF_ATIM BIT(9)
+#define AR9170_MAC_REG_FTF_DEASSOC BIT(10)
+#define AR9170_MAC_REG_FTF_AUTH BIT(11)
+#define AR9170_MAC_REG_FTF_DEAUTH BIT(12)
+#define AR9170_MAC_REG_FTF_BIT13 BIT(13)
+#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
+#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
+#define AR9170_MAC_REG_FTF_BAR BIT(24)
+#define AR9170_MAC_REG_FTF_BIT25 BIT(25)
+#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
+#define AR9170_MAC_REG_FTF_RTS BIT(27)
+#define AR9170_MAC_REG_FTF_CTS BIT(28)
+#define AR9170_MAC_REG_FTF_ACK BIT(29)
+#define AR9170_MAC_REG_FTF_CFE BIT(30)
+#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
+#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff
+#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
+
+#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
+#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4)
+#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8)
+#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC)
+#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0)
+#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC)
+#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC)
+#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4)
+
+
+#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
+#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
+
+#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0)
+
+#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700)
+#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0
+#define AR9170_MAC_REG_POWERMGT_AP 0xa1
+#define AR9170_MAC_REG_POWERMGT_STA 0x2
+#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3
+#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24)
+
+#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
+#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
+
+#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00)
+#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04)
+#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08)
+#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C)
+#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10)
+#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14)
+#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18)
+
+#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28)
+
+#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0)
+#define AR9170_MAC_FCS_SWFCS 0x1
+#define AR9170_MAC_FCS_FIFO_PROT 0x4
+
+
+#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30)
+
+#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
+#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
+
+#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
+#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
+
+#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C)
+#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
+#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
+#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
+#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
+
+#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84)
+#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88)
+#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90)
+#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94)
+#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0)
+#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4)
+
+
+#define AR9170_PWR_REG_BASE 0x1D4000
+
+#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
+#define AR9170_PWR_CLK_AHB_40MHZ 0
+#define AR9170_PWR_CLK_AHB_20_22MHZ 1
+#define AR9170_PWR_CLK_AHB_40_44MHZ 2
+#define AR9170_PWR_CLK_AHB_80_88MHZ 3
+#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
+
+
+/* put beacon here in memory */
+#define AR9170_BEACON_BUFFER_ADDRESS 0x117900
+
+
+struct ar9170_tx_control {
+ __le16 length;
+ __le16 mac_control;
+ __le32 phy_control;
+ u8 frame_data[0];
+} __packed;
+
+/* these are either-or */
+#define AR9170_TX_MAC_PROT_RTS 0x0001
+#define AR9170_TX_MAC_PROT_CTS 0x0002
+
+#define AR9170_TX_MAC_NO_ACK 0x0004
+/* if unset, MAC will only do SIFS space before frame */
+#define AR9170_TX_MAC_BACKOFF 0x0008
+#define AR9170_TX_MAC_BURST 0x0010
+#define AR9170_TX_MAC_AGGR 0x0020
+
+/* encryption is a two-bit field */
+#define AR9170_TX_MAC_ENCR_NONE 0x0000
+#define AR9170_TX_MAC_ENCR_RC4 0x0040
+#define AR9170_TX_MAC_ENCR_CENC 0x0080
+#define AR9170_TX_MAC_ENCR_AES 0x00c0
+
+#define AR9170_TX_MAC_MMIC 0x0100
+#define AR9170_TX_MAC_HW_DURATION 0x0200
+#define AR9170_TX_MAC_QOS_SHIFT 10
+#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT)
+#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400
+#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800
+#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
+#define AR9170_TX_MAC_TXOP_RIFS 0x2000
+#define AR9170_TX_MAC_IMM_AMPDU 0x4000
+#define AR9170_TX_MAC_RATE_PROBE 0x8000
+
+/* either-or */
+#define AR9170_TX_PHY_MOD_CCK 0x00000000
+#define AR9170_TX_PHY_MOD_OFDM 0x00000001
+#define AR9170_TX_PHY_MOD_HT 0x00000002
+
+/* depends on modulation */
+#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
+#define AR9170_TX_PHY_GREENFIELD 0x00000004
+
+#define AR9170_TX_PHY_BW_SHIFT 3
+#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT)
+#define AR9170_TX_PHY_BW_20MHZ 0
+#define AR9170_TX_PHY_BW_40MHZ 2
+#define AR9170_TX_PHY_BW_40MHZ_DUP 3
+
+#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6
+#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT)
+
+#define AR9170_TX_PHY_TX_PWR_SHIFT 9
+#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT)
+
+/* not part of the hw-spec */
+#define AR9170_TX_PHY_QOS_SHIFT 25
+#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT)
+
+#define AR9170_TX_PHY_TXCHAIN_SHIFT 15
+#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT)
+#define AR9170_TX_PHY_TXCHAIN_1 1
+/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
+#define AR9170_TX_PHY_TXCHAIN_2 5
+
+#define AR9170_TX_PHY_MCS_SHIFT 18
+#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT)
+
+#define AR9170_TX_PHY_SHORT_GI 0x80000000
+
+struct ar9170_rx_head {
+ u8 plcp[12];
+};
+
+struct ar9170_rx_tail {
+ union {
+ struct {
+ u8 rssi_ant0, rssi_ant1, rssi_ant2,
+ rssi_ant0x, rssi_ant1x, rssi_ant2x,
+ rssi_combined;
+ };
+ u8 rssi[7];
+ };
+
+ u8 evm_stream0[6], evm_stream1[6];
+ u8 phy_err;
+ u8 SAidx, DAidx;
+ u8 error;
+ u8 status;
+};
+
+#define AR9170_ENC_ALG_NONE 0x0
+#define AR9170_ENC_ALG_WEP64 0x1
+#define AR9170_ENC_ALG_TKIP 0x2
+#define AR9170_ENC_ALG_AESCCMP 0x4
+#define AR9170_ENC_ALG_WEP128 0x5
+#define AR9170_ENC_ALG_WEP256 0x6
+#define AR9170_ENC_ALG_CENC 0x7
+
+#define AR9170_RX_ENC_SOFTWARE 0x8
+
+static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
+{
+ return (t->SAidx & 0xc0) >> 4 |
+ (t->DAidx & 0xc0) >> 6;
+}
+
+#define AR9170_RX_STATUS_MODULATION_MASK 0x03
+#define AR9170_RX_STATUS_MODULATION_CCK 0x00
+#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
+#define AR9170_RX_STATUS_MODULATION_HT 0x02
+#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
+
+/* depends on modulation */
+#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
+#define AR9170_RX_STATUS_GREENFIELD 0x08
+
+#define AR9170_RX_STATUS_MPDU_MASK 0x30
+#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
+#define AR9170_RX_STATUS_MPDU_FIRST 0x10
+#define AR9170_RX_STATUS_MPDU_MIDDLE 0x20
+#define AR9170_RX_STATUS_MPDU_LAST 0x30
+
+
+#define AR9170_RX_ERROR_RXTO 0x01
+#define AR9170_RX_ERROR_OVERRUN 0x02
+#define AR9170_RX_ERROR_DECRYPT 0x04
+#define AR9170_RX_ERROR_FCS 0x08
+#define AR9170_RX_ERROR_WRONG_RA 0x10
+#define AR9170_RX_ERROR_PLCP 0x20
+#define AR9170_RX_ERROR_MMIC 0x40
+
+struct ar9170_cmd_tx_status {
+ __le16 unkn;
+ u8 dst[ETH_ALEN];
+ __le32 rate;
+ __le16 status;
+} __packed;
+
+#define AR9170_TX_STATUS_COMPLETE 0x00
+#define AR9170_TX_STATUS_RETRY 0x01
+#define AR9170_TX_STATUS_FAILED 0x02
+
+struct ar9170_cmd_ba_failed_count {
+ __le16 failed;
+ __le16 rate;
+} __packed;
+
+struct ar9170_cmd_response {
+ u8 flag;
+ u8 type;
+
+ union {
+ struct ar9170_cmd_tx_status tx_status;
+ struct ar9170_cmd_ba_failed_count ba_fail_cnt;
+ u8 data[0];
+ };
+} __packed;
+
+/* QoS */
+
+/* mac80211 queue to HW/FW map */
+static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 };
+
+/* HW/FW queue to mac80211 map */
+static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 };
+
+enum ar9170_txq {
+ AR9170_TXQ_BE,
+ AR9170_TXQ_BK,
+ AR9170_TXQ_VI,
+ AR9170_TXQ_VO,
+
+ __AR9170_NUM_TXQ,
+};
+
+#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ar9170/led.c b/drivers/net/wireless/ar9170/led.c
new file mode 100644
index 000000000000..341cead7f606
--- /dev/null
+++ b/drivers/net/wireless/ar9170/led.c
@@ -0,0 +1,171 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * LED handling
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ar9170.h"
+#include "cmd.h"
+
+int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state)
+{
+ return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state);
+}
+
+int ar9170_init_leds(struct ar9170 *ar)
+{
+ int err;
+
+ /* disable LEDs */
+ /* GPIO [0/1 mode: output, 2/3: input] */
+ err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
+ if (err)
+ goto out;
+
+ /* GPIO 0/1 value: off */
+ err = ar9170_set_leds_state(ar, 0);
+
+out:
+ return err;
+}
+
+#ifdef CONFIG_AR9170_LEDS
+static void ar9170_update_leds(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
+ int i, tmp, blink_delay = 1000;
+ u32 led_val = 0;
+ bool rerun = false;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return ;
+
+ mutex_lock(&ar->mutex);
+ for (i = 0; i < AR9170_NUM_LEDS; i++)
+ if (ar->leds[i].toggled) {
+ led_val |= 1 << i;
+
+ tmp = 70 + 200 / (ar->leds[i].toggled);
+ if (tmp < blink_delay)
+ blink_delay = tmp;
+
+ if (ar->leds[i].toggled > 1)
+ ar->leds[i].toggled = 0;
+
+ rerun = true;
+ }
+
+ ar9170_set_leds_state(ar, led_val);
+ mutex_unlock(&ar->mutex);
+
+ if (rerun)
+ queue_delayed_work(ar->hw->workqueue, &ar->led_work,
+ msecs_to_jiffies(blink_delay));
+}
+
+static void ar9170_led_brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
+ struct ar9170 *ar = arl->ar;
+
+ arl->toggled++;
+
+ if (likely(IS_ACCEPTING_CMD(ar) && brightness))
+ queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10);
+}
+
+static int ar9170_register_led(struct ar9170 *ar, int i, char *name,
+ char *trigger)
+{
+ int err;
+
+ snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
+ "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
+
+ ar->leds[i].ar = ar;
+ ar->leds[i].l.name = ar->leds[i].name;
+ ar->leds[i].l.brightness_set = ar9170_led_brightness_set;
+ ar->leds[i].l.brightness = 0;
+ ar->leds[i].l.default_trigger = trigger;
+
+ err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
+ &ar->leds[i].l);
+ if (err)
+ printk(KERN_ERR "%s: failed to register %s LED (%d).\n",
+ wiphy_name(ar->hw->wiphy), ar->leds[i].name, err);
+ else
+ ar->leds[i].registered = true;
+
+ return err;
+}
+
+void ar9170_unregister_leds(struct ar9170 *ar)
+{
+ int i;
+
+ cancel_delayed_work_sync(&ar->led_work);
+
+ for (i = 0; i < AR9170_NUM_LEDS; i++)
+ if (ar->leds[i].registered) {
+ led_classdev_unregister(&ar->leds[i].l);
+ ar->leds[i].registered = false;
+ }
+}
+
+int ar9170_register_leds(struct ar9170 *ar)
+{
+ int err;
+
+ INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds);
+
+ err = ar9170_register_led(ar, 0, "tx",
+ ieee80211_get_tx_led_name(ar->hw));
+ if (err)
+ goto fail;
+
+ err = ar9170_register_led(ar, 1, "assoc",
+ ieee80211_get_assoc_led_name(ar->hw));
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ ar9170_unregister_leds(ar);
+ return err;
+}
+
+#endif /* CONFIG_AR9170_LEDS */
diff --git a/drivers/net/wireless/ar9170/mac.c b/drivers/net/wireless/ar9170/mac.c
new file mode 100644
index 000000000000..c8fa3073169f
--- /dev/null
+++ b/drivers/net/wireless/ar9170/mac.c
@@ -0,0 +1,452 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * MAC programming
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "ar9170.h"
+#include "cmd.h"
+
+int ar9170_set_qos(struct ar9170 *ar)
+{
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
+ (ar->edcf[0].cw_max << 16));
+ ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
+ (ar->edcf[1].cw_max << 16));
+ ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
+ (ar->edcf[2].cw_max << 16));
+ ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
+ (ar->edcf[3].cw_max << 16));
+ ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
+ (ar->edcf[4].cw_max << 16));
+
+ ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS,
+ ((ar->edcf[0].aifs * 9 + 10)) |
+ ((ar->edcf[1].aifs * 9 + 10) << 12) |
+ ((ar->edcf[2].aifs * 9 + 10) << 24));
+ ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS,
+ ((ar->edcf[2].aifs * 9 + 10) >> 8) |
+ ((ar->edcf[3].aifs * 9 + 10) << 4) |
+ ((ar->edcf[4].aifs * 9 + 10) << 16));
+
+ ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
+ ar->edcf[0].txop | ar->edcf[1].txop << 16);
+ ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
+ ar->edcf[1].txop | ar->edcf[3].txop << 16);
+
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
+int ar9170_init_mac(struct ar9170 *ar)
+{
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
+
+ ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0);
+
+ /* enable MMIC */
+ ar9170_regwrite(AR9170_MAC_REG_SNIFFER,
+ AR9170_MAC_REG_SNIFFER_DEFAULTS);
+
+ ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
+
+ ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
+ ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
+ ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
+
+ /* CF-END mode */
+ ar9170_regwrite(0x1c3b2c, 0x19000000);
+
+ /* NAV protects ACK only (in TXOP) */
+ ar9170_regwrite(0x1c3b38, 0x201);
+
+ /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
+ /* OTUS set AM to 0x1 */
+ ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
+
+ ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
+
+ /* AGG test code*/
+ /* Aggregation MAX number and timeout */
+ ar9170_regwrite(0x1c3b9c, 0x10000a);
+
+ ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
+ AR9170_MAC_REG_FTF_DEFAULTS);
+
+ /* Enable deaggregator, response in sniffer mode */
+ ar9170_regwrite(0x1c3c40, 0x1 | 1<<30);
+
+ /* rate sets */
+ ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
+ ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
+ ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb);
+
+ /* MIMO response control */
+ ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */
+
+ /* switch MAC to OTUS interface */
+ ar9170_regwrite(0x1c3600, 0x3);
+
+ ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
+
+ /* set PHY register read timeout (??) */
+ ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
+
+ /* Disable Rx TimeOut, workaround for BB. */
+ ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
+
+ /* Set CPU clock frequency to 88/80MHz */
+ ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL,
+ AR9170_PWR_CLK_AHB_80_88MHZ |
+ AR9170_PWR_CLK_DAC_160_INV_DLY);
+
+ /* Set WLAN DMA interrupt mode: generate int per packet */
+ ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
+
+ ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
+ AR9170_MAC_FCS_FIFO_PROT);
+
+ /* Disables the CF_END frame, undocumented register */
+ ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
+ 0x141E0F48);
+
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
+static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
+{
+ static const u8 zero[ETH_ALEN] = { 0 };
+
+ if (!mac)
+ mac = zero;
+
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(reg,
+ (mac[3] << 24) | (mac[2] << 16) |
+ (mac[1] << 8) | mac[0]);
+
+ ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
+
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
+int ar9170_update_multicast(struct ar9170 *ar)
+{
+ int err;
+
+ ar9170_regwrite_begin(ar);
+ ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H,
+ ar->want_mc_hash >> 32);
+ ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L,
+ ar->want_mc_hash);
+
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+
+ if (err)
+ return err;
+
+ ar->cur_mc_hash = ar->want_mc_hash;
+
+ return 0;
+}
+
+int ar9170_update_frame_filter(struct ar9170 *ar)
+{
+ int err;
+
+ err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER,
+ ar->want_filter);
+
+ if (err)
+ return err;
+
+ ar->cur_filter = ar->want_filter;
+
+ return 0;
+}
+
+static int ar9170_set_promiscouous(struct ar9170 *ar)
+{
+ u32 encr_mode, sniffer;
+ int err;
+
+ err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer);
+ if (err)
+ return err;
+
+ err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode);
+ if (err)
+ return err;
+
+ if (ar->sniffer_enabled) {
+ sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
+
+ /*
+ * Rx decryption works in place.
+ *
+ * If we don't disable it, the hardware will render all
+ * encrypted frames which are encrypted with an unknown
+ * key useless.
+ */
+
+ encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
+ ar->sniffer_enabled = true;
+ } else {
+ sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
+
+ if (ar->rx_software_decryption)
+ encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
+ else
+ encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
+ }
+
+ ar9170_regwrite_begin(ar);
+ ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode);
+ ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
+int ar9170_set_operating_mode(struct ar9170 *ar)
+{
+ u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
+ u8 *mac_addr, *bssid;
+ int err;
+
+ if (ar->vif) {
+ mac_addr = ar->mac_addr;
+ bssid = ar->bssid;
+
+ switch (ar->vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
+ break;
+/* case NL80211_IFTYPE_AP:
+ pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
+ break;*/
+ case NL80211_IFTYPE_WDS:
+ pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ ar->sniffer_enabled = true;
+ ar->rx_software_decryption = true;
+ break;
+ default:
+ pm_mode |= AR9170_MAC_REG_POWERMGT_STA;
+ break;
+ }
+ } else {
+ mac_addr = NULL;
+ bssid = NULL;
+ }
+
+ err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
+ if (err)
+ return err;
+
+ err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
+ if (err)
+ return err;
+
+ err = ar9170_set_promiscouous(ar);
+ if (err)
+ return err;
+
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
+int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry)
+{
+ u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
+
+ return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
+}
+
+int ar9170_set_beacon_timers(struct ar9170 *ar)
+{
+ u32 v = 0;
+ u32 pretbtt = 0;
+
+ v |= ar->hw->conf.beacon_int;
+
+ if (ar->vif) {
+ switch (ar->vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ v |= BIT(25);
+ break;
+ case NL80211_IFTYPE_AP:
+ v |= BIT(24);
+ pretbtt = (ar->hw->conf.beacon_int - 6) << 16;
+ break;
+ default:
+ break;
+ }
+
+ v |= ar->vif->bss_conf.dtim_period << 16;
+ }
+
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
+ ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
+ ar9170_regwrite_finish();
+ return ar9170_regwrite_result();
+}
+
+int ar9170_update_beacon(struct ar9170 *ar)
+{
+ struct sk_buff *skb;
+ __le32 *data, *old = NULL;
+ u32 word;
+ int i;
+
+ skb = ieee80211_beacon_get(ar->hw, ar->vif);
+ if (!skb)
+ return -ENOMEM;
+
+ data = (__le32 *)skb->data;
+ if (ar->beacon)
+ old = (__le32 *)ar->beacon->data;
+
+ ar9170_regwrite_begin(ar);
+ for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
+ /*
+ * XXX: This accesses beyond skb data for up
+ * to the last 3 bytes!!
+ */
+
+ if (old && (data[i] == old[i]))
+ continue;
+
+ word = le32_to_cpu(data[i]);
+ ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word);
+ }
+
+ /* XXX: use skb->cb info */
+ if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
+ ((skb->len + 4) << (3+16)) + 0x0400);
+ else
+ ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
+ ((skb->len + 4) << (3+16)) + 0x0400);
+
+ ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
+ ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
+ ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1);
+
+ ar9170_regwrite_finish();
+
+ dev_kfree_skb(ar->beacon);
+ ar->beacon = skb;
+
+ return ar9170_regwrite_result();
+}
+
+void ar9170_new_beacon(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ beacon_work);
+ struct sk_buff *skb;
+
+ if (unlikely(!IS_STARTED(ar)))
+ return ;
+
+ mutex_lock(&ar->mutex);
+
+ if (!ar->vif)
+ goto out;
+
+ ar9170_update_beacon(ar);
+
+ rcu_read_lock();
+ while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif)))
+ ar9170_op_tx(ar->hw, skb);
+
+ rcu_read_unlock();
+
+ out:
+ mutex_unlock(&ar->mutex);
+}
+
+int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
+ u8 keyidx, u8 *keydata, int keylen)
+{
+ __le32 vals[7];
+ static const u8 bcast[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 dummy;
+
+ mac = mac ? : bcast;
+
+ vals[0] = cpu_to_le32((keyidx << 16) + id);
+ vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype);
+ vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 |
+ mac[3] << 8 | mac[2]);
+ memset(&vals[3], 0, 16);
+ if (keydata)
+ memcpy(&vals[3], keydata, keylen);
+
+ return ar->exec_cmd(ar, AR9170_CMD_EKEY,
+ sizeof(vals), (u8 *)vals,
+ 1, &dummy);
+}
+
+int ar9170_disable_key(struct ar9170 *ar, u8 id)
+{
+ __le32 val = cpu_to_le32(id);
+ u8 dummy;
+
+ return ar->exec_cmd(ar, AR9170_CMD_EKEY,
+ sizeof(val), (u8 *)&val,
+ 1, &dummy);
+}
diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c
new file mode 100644
index 000000000000..5996ff9f7f47
--- /dev/null
+++ b/drivers/net/wireless/ar9170/main.c
@@ -0,0 +1,1671 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * mac80211 interaction code
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, Christian Lamparter <chunkeey@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "ar9170.h"
+#include "hw.h"
+#include "cmd.h"
+
+static int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate) | (_txpidx) << 4, \
+}
+
+static struct ieee80211_rate __ar9170_ratetable[] = {
+ RATE(10, 0, 0, 0),
+ RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0xb, 0, 0),
+ RATE(90, 0xf, 0, 0),
+ RATE(120, 0xa, 0, 0),
+ RATE(180, 0xe, 0, 0),
+ RATE(240, 0x9, 0, 0),
+ RATE(360, 0xd, 1, 0),
+ RATE(480, 0x8, 2, 0),
+ RATE(540, 0xc, 3, 0),
+};
+#undef RATE
+
+#define ar9170_g_ratetable (__ar9170_ratetable + 0)
+#define ar9170_g_ratetable_size 12
+#define ar9170_a_ratetable (__ar9170_ratetable + 4)
+#define ar9170_a_ratetable_size 8
+
+/*
+ * NB: The hw_value is used as an index into the ar9170_phy_freq_params
+ * array in phy.c so that we don't have to do frequency lookups!
+ */
+#define CHAN(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 18, /* XXX */ \
+}
+
+static struct ieee80211_channel ar9170_2ghz_chantable[] = {
+ CHAN(2412, 0),
+ CHAN(2417, 1),
+ CHAN(2422, 2),
+ CHAN(2427, 3),
+ CHAN(2432, 4),
+ CHAN(2437, 5),
+ CHAN(2442, 6),
+ CHAN(2447, 7),
+ CHAN(2452, 8),
+ CHAN(2457, 9),
+ CHAN(2462, 10),
+ CHAN(2467, 11),
+ CHAN(2472, 12),
+ CHAN(2484, 13),
+};
+
+static struct ieee80211_channel ar9170_5ghz_chantable[] = {
+ CHAN(4920, 14),
+ CHAN(4940, 15),
+ CHAN(4960, 16),
+ CHAN(4980, 17),
+ CHAN(5040, 18),
+ CHAN(5060, 19),
+ CHAN(5080, 20),
+ CHAN(5180, 21),
+ CHAN(5200, 22),
+ CHAN(5220, 23),
+ CHAN(5240, 24),
+ CHAN(5260, 25),
+ CHAN(5280, 26),
+ CHAN(5300, 27),
+ CHAN(5320, 28),
+ CHAN(5500, 29),
+ CHAN(5520, 30),
+ CHAN(5540, 31),
+ CHAN(5560, 32),
+ CHAN(5580, 33),
+ CHAN(5600, 34),
+ CHAN(5620, 35),
+ CHAN(5640, 36),
+ CHAN(5660, 37),
+ CHAN(5680, 38),
+ CHAN(5700, 39),
+ CHAN(5745, 40),
+ CHAN(5765, 41),
+ CHAN(5785, 42),
+ CHAN(5805, 43),
+ CHAN(5825, 44),
+ CHAN(5170, 45),
+ CHAN(5190, 46),
+ CHAN(5210, 47),
+ CHAN(5230, 48),
+};
+#undef CHAN
+
+static struct ieee80211_supported_band ar9170_band_2GHz = {
+ .channels = ar9170_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
+ .bitrates = ar9170_g_ratetable,
+ .n_bitrates = ar9170_g_ratetable_size,
+};
+
+#ifdef AR9170_QUEUE_DEBUG
+/*
+ * In case some wants works with AR9170's crazy tx_status queueing techniques.
+ * He might need this rather useful probing function.
+ *
+ * NOTE: caller must hold the queue's spinlock!
+ */
+
+static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct ar9170_tx_control *txc = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *)txc->frame_data;
+
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] "
+ "mac_control:%04x, phy_control:%08x]\n",
+ wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
+ ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control),
+ le32_to_cpu(txc->phy_control));
+}
+
+static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar,
+ struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+ int i = 0;
+
+ printk(KERN_DEBUG "---[ cut here ]---\n");
+ printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n",
+ wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
+
+ skb_queue_walk(queue, skb) {
+ struct ar9170_tx_control *txc = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *)txc->frame_data;
+
+ printk(KERN_DEBUG "index:%d => \n", i);
+ ar9170_print_txheader(ar, skb);
+ }
+ printk(KERN_DEBUG "---[ end ]---\n");
+}
+#endif /* AR9170_QUEUE_DEBUG */
+
+static struct ieee80211_supported_band ar9170_band_5GHz = {
+ .channels = ar9170_5ghz_chantable,
+ .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
+ .bitrates = ar9170_a_ratetable,
+ .n_bitrates = ar9170_a_ratetable_size,
+};
+
+void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
+ bool valid_status, u16 tx_status)
+{
+ struct ieee80211_tx_info *txinfo;
+ unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ ar->tx_stats[queue].len--;
+ if (ieee80211_queue_stopped(ar->hw, queue))
+ ieee80211_wake_queue(ar->hw, queue);
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(txinfo);
+
+ switch (tx_status) {
+ case AR9170_TX_STATUS_RETRY:
+ retries = 2;
+ case AR9170_TX_STATUS_COMPLETE:
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+
+ case AR9170_TX_STATUS_FAILED:
+ retries = ar->hw->conf.long_frame_max_tx_count;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
+ wiphy_name(ar->hw->wiphy), tx_status);
+ break;
+ }
+
+ if (valid_status)
+ txinfo->status.rates[0].count = retries + 1;
+
+ skb_pull(skb, sizeof(struct ar9170_tx_control));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+}
+
+static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar,
+ const u8 *mac,
+ const u32 queue,
+ struct sk_buff_head *q)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb_queue_walk(q, skb) {
+ struct ar9170_tx_control *txc = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *) txc->frame_data;
+ u32 txc_queue = (le32_to_cpu(txc->phy_control) &
+ AR9170_TX_PHY_QOS_MASK) >>
+ AR9170_TX_PHY_QOS_SHIFT;
+
+ if ((queue != txc_queue) ||
+ (compare_ether_addr(ieee80211_get_DA(hdr), mac)))
+ continue;
+
+ __skb_unlink(skb, q);
+ spin_unlock_irqrestore(&q->lock, flags);
+ return skb;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+ return NULL;
+}
+
+static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
+ const u32 queue)
+{
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+
+ /*
+ * Unfortunately, the firmware does not tell to which (queued) frame
+ * this transmission status report belongs to.
+ *
+ * So we have to make risky guesses - with the scarce information
+ * the firmware provided (-> destination MAC, and phy_control) -
+ * and hope that we picked the right one...
+ */
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ar->hw, mac);
+
+ if (likely(sta)) {
+ struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
+ skb = skb_dequeue(&sta_priv->tx_status[queue]);
+ rcu_read_unlock();
+ if (likely(skb))
+ return skb;
+ } else
+ rcu_read_unlock();
+
+ /* scan the waste queue for candidates */
+ skb = ar9170_find_skb_in_queue(ar, mac, queue,
+ &ar->global_tx_status_waste);
+ if (!skb) {
+ /* so it still _must_ be in the global list. */
+ skb = ar9170_find_skb_in_queue(ar, mac, queue,
+ &ar->global_tx_status);
+ }
+
+#ifdef AR9170_QUEUE_DEBUG
+ if (unlikely((!skb) && net_ratelimit())) {
+ printk(KERN_ERR "%s: ESS:[%pM] does not have any "
+ "outstanding frames in this queue (%d).\n",
+ wiphy_name(ar->hw->wiphy), mac, queue);
+ }
+#endif /* AR9170_QUEUE_DEBUG */
+ return skb;
+}
+
+/*
+ * This worker tries to keep the global tx_status queue empty.
+ * So we can guarantee that incoming tx_status reports for
+ * unregistered stations are always synced with the actual
+ * frame - which we think - belongs to.
+ */
+
+static void ar9170_tx_status_janitor(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ tx_status_janitor.work);
+ struct sk_buff *skb;
+
+ if (unlikely(!IS_STARTED(ar)))
+ return ;
+
+ mutex_lock(&ar->mutex);
+ /* recycle the garbage back to mac80211... one by one. */
+ while ((skb = skb_dequeue(&ar->global_tx_status_waste))) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: dispose queued frame =>\n",
+ wiphy_name(ar->hw->wiphy));
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ ar9170_handle_tx_status(ar, skb, false,
+ AR9170_TX_STATUS_FAILED);
+ }
+
+ while ((skb = skb_dequeue(&ar->global_tx_status))) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: moving frame into waste queue =>\n",
+ wiphy_name(ar->hw->wiphy));
+
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ skb_queue_tail(&ar->global_tx_status_waste, skb);
+ }
+
+ /* recall the janitor in 100ms - if there's garbage in the can. */
+ if (skb_queue_len(&ar->global_tx_status_waste) > 0)
+ queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
+ msecs_to_jiffies(100));
+
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar9170_handle_command_response(struct ar9170 *ar,
+ void *buf, u32 len)
+{
+ struct ar9170_cmd_response *cmd = (void *) buf;
+
+ if ((cmd->type & 0xc0) != 0xc0) {
+ ar->callback_cmd(ar, len, buf);
+ return;
+ }
+
+ /* hardware event handlers */
+ switch (cmd->type) {
+ case 0xc1: {
+ /*
+ * TX status notification:
+ * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
+ *
+ * XX always 81
+ * YY always 00
+ * M1-M6 is the MAC address
+ * R1-R4 is the transmit rate
+ * S1-S2 is the transmit status
+ */
+
+ struct sk_buff *skb;
+ u32 queue = (le32_to_cpu(cmd->tx_status.rate) &
+ AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT;
+
+ skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue);
+ if (unlikely(!skb))
+ return ;
+
+ ar9170_handle_tx_status(ar, skb, true,
+ le16_to_cpu(cmd->tx_status.status));
+ break;
+ }
+
+ case 0xc0:
+ /*
+ * pre-TBTT event
+ */
+ if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
+ queue_work(ar->hw->workqueue, &ar->beacon_work);
+ break;
+
+ case 0xc2:
+ /*
+ * (IBSS) beacon send notification
+ * bytes: 04 c2 XX YY B4 B3 B2 B1
+ *
+ * XX always 80
+ * YY always 00
+ * B1-B4 "should" be the number of send out beacons.
+ */
+ break;
+
+ case 0xc3:
+ /* End of Atim Window */
+ break;
+
+ case 0xc4:
+ case 0xc5:
+ /* BlockACK events */
+ break;
+
+ case 0xc6:
+ /* Watchdog Interrupt */
+ break;
+
+ case 0xc9:
+ /* retransmission issue / SIFS/EIFS collision ?! */
+ break;
+
+ default:
+ printk(KERN_INFO "received unhandled event %x\n", cmd->type);
+ print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
+ break;
+ }
+}
+
+/*
+ * If the frame alignment is right (or the kernel has
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
+ * is only a single MPDU in the USB frame, then we can
+ * submit to mac80211 the SKB directly. However, since
+ * there may be multiple packets in one SKB in stream
+ * mode, and we need to observe the proper ordering,
+ * this is non-trivial.
+ */
+static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+{
+ struct sk_buff *skb;
+ struct ar9170_rx_head *head = (void *)buf;
+ struct ar9170_rx_tail *tail;
+ struct ieee80211_rx_status status;
+ int mpdu_len, i;
+ u8 error, antennas = 0, decrypt;
+ __le16 fc;
+ int reserved;
+
+ if (unlikely(!IS_STARTED(ar)))
+ return ;
+
+ /* Received MPDU */
+ mpdu_len = len;
+ mpdu_len -= sizeof(struct ar9170_rx_head);
+ mpdu_len -= sizeof(struct ar9170_rx_tail);
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_tail) != 24);
+
+ if (mpdu_len <= FCS_LEN)
+ return;
+
+ tail = (void *)(buf + sizeof(struct ar9170_rx_head) + mpdu_len);
+
+ for (i = 0; i < 3; i++)
+ if (tail->rssi[i] != 0x80)
+ antennas |= BIT(i);
+
+ /* post-process RSSI */
+ for (i = 0; i < 7; i++)
+ if (tail->rssi[i] & 0x80)
+ tail->rssi[i] = ((tail->rssi[i] & 0x7f) + 1) & 0x7f;
+
+ memset(&status, 0, sizeof(status));
+
+ status.band = ar->channel->band;
+ status.freq = ar->channel->center_freq;
+ status.signal = ar->noise[0] + tail->rssi_combined;
+ status.noise = ar->noise[0];
+ status.antenna = antennas;
+
+ switch (tail->status & AR9170_RX_STATUS_MODULATION_MASK) {
+ case AR9170_RX_STATUS_MODULATION_CCK:
+ if (tail->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
+ status.flag |= RX_FLAG_SHORTPRE;
+ switch (head->plcp[0]) {
+ case 0x0a:
+ status.rate_idx = 0;
+ break;
+ case 0x14:
+ status.rate_idx = 1;
+ break;
+ case 0x37:
+ status.rate_idx = 2;
+ break;
+ case 0x6e:
+ status.rate_idx = 3;
+ break;
+ default:
+ if ((!ar->sniffer_enabled) && (net_ratelimit()))
+ printk(KERN_ERR "%s: invalid plcp cck rate "
+ "(%x).\n", wiphy_name(ar->hw->wiphy),
+ head->plcp[0]);
+ return;
+ }
+ break;
+ case AR9170_RX_STATUS_MODULATION_OFDM:
+ switch (head->plcp[0] & 0xF) {
+ case 0xB:
+ status.rate_idx = 0;
+ break;
+ case 0xF:
+ status.rate_idx = 1;
+ break;
+ case 0xA:
+ status.rate_idx = 2;
+ break;
+ case 0xE:
+ status.rate_idx = 3;
+ break;
+ case 0x9:
+ status.rate_idx = 4;
+ break;
+ case 0xD:
+ status.rate_idx = 5;
+ break;
+ case 0x8:
+ status.rate_idx = 6;
+ break;
+ case 0xC:
+ status.rate_idx = 7;
+ break;
+ default:
+ if ((!ar->sniffer_enabled) && (net_ratelimit()))
+ printk(KERN_ERR "%s: invalid plcp ofdm rate "
+ "(%x).\n", wiphy_name(ar->hw->wiphy),
+ head->plcp[0]);
+ return;
+ }
+ if (status.band == IEEE80211_BAND_2GHZ)
+ status.rate_idx += 4;
+ break;
+ case AR9170_RX_STATUS_MODULATION_HT:
+ case AR9170_RX_STATUS_MODULATION_DUPOFDM:
+ /* XXX */
+
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: invalid modulation\n",
+ wiphy_name(ar->hw->wiphy));
+ return;
+ }
+
+ error = tail->error;
+
+ if (error & AR9170_RX_ERROR_MMIC) {
+ status.flag |= RX_FLAG_MMIC_ERROR;
+ error &= ~AR9170_RX_ERROR_MMIC;
+ }
+
+ if (error & AR9170_RX_ERROR_PLCP) {
+ status.flag |= RX_FLAG_FAILED_PLCP_CRC;
+ error &= ~AR9170_RX_ERROR_PLCP;
+ }
+
+ if (error & AR9170_RX_ERROR_FCS) {
+ status.flag |= RX_FLAG_FAILED_FCS_CRC;
+ error &= ~AR9170_RX_ERROR_FCS;
+ }
+
+ decrypt = ar9170_get_decrypt_type(tail);
+ if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
+ decrypt != AR9170_ENC_ALG_NONE)
+ status.flag |= RX_FLAG_DECRYPTED;
+
+ /* ignore wrong RA errors */
+ error &= ~AR9170_RX_ERROR_WRONG_RA;
+
+ if (error & AR9170_RX_ERROR_DECRYPT) {
+ error &= ~AR9170_RX_ERROR_DECRYPT;
+
+ /*
+ * Rx decryption is done in place,
+ * the original data is lost anyway.
+ */
+ return ;
+ }
+
+ /* drop any other error frames */
+ if ((error) && (net_ratelimit())) {
+ printk(KERN_DEBUG "%s: errors: %#x\n",
+ wiphy_name(ar->hw->wiphy), error);
+ return;
+ }
+
+ buf += sizeof(struct ar9170_rx_head);
+ fc = *(__le16 *)buf;
+
+ if (ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc))
+ reserved = 32 + 2;
+ else
+ reserved = 32;
+
+ skb = dev_alloc_skb(mpdu_len + reserved);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, reserved);
+ memcpy(skb_put(skb, mpdu_len), buf, mpdu_len);
+ ieee80211_rx_irqsafe(ar->hw, skb, &status);
+}
+
+void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
+{
+ unsigned int i, tlen, resplen;
+ u8 *tbuf, *respbuf;
+
+ tbuf = skb->data;
+ tlen = skb->len;
+
+ while (tlen >= 4) {
+ int clen = tbuf[1] << 8 | tbuf[0];
+ int wlen = (clen + 3) & ~3;
+
+ /*
+ * parse stream (if any)
+ */
+ if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
+ printk(KERN_ERR "%s: missing tag!\n",
+ wiphy_name(ar->hw->wiphy));
+ return ;
+ }
+ if (wlen > tlen - 4) {
+ printk(KERN_ERR "%s: invalid RX (%d, %d, %d)\n",
+ wiphy_name(ar->hw->wiphy), clen, wlen, tlen);
+ print_hex_dump(KERN_DEBUG, "data: ",
+ DUMP_PREFIX_OFFSET,
+ 16, 1, tbuf, tlen, true);
+ return ;
+ }
+ resplen = clen;
+ respbuf = tbuf + 4;
+ tbuf += wlen + 4;
+ tlen -= wlen + 4;
+
+ i = 0;
+
+ /* weird thing, but this is the same in the original driver */
+ while (resplen > 2 && i < 12 &&
+ respbuf[0] == 0xff && respbuf[1] == 0xff) {
+ i += 2;
+ resplen -= 2;
+ respbuf += 2;
+ }
+
+ if (resplen < 4)
+ continue;
+
+ /* found the 6 * 0xffff marker? */
+ if (i == 12)
+ ar9170_handle_command_response(ar, respbuf, resplen);
+ else
+ ar9170_handle_mpdu(ar, respbuf, resplen);
+ }
+
+ if (tlen)
+ printk(KERN_ERR "%s: buffer remains!\n",
+ wiphy_name(ar->hw->wiphy));
+}
+
+#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
+do { \
+ queue.aifs = ai_fs; \
+ queue.cw_min = cwmin; \
+ queue.cw_max = cwmax; \
+ queue.txop = _txop; \
+} while (0)
+
+static int ar9170_op_start(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+ int err, i;
+
+ mutex_lock(&ar->mutex);
+
+ /* reinitialize queues statistics */
+ memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
+ for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++)
+ ar->tx_stats[i].limit = 8;
+
+ /* reset QoS defaults */
+ AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
+ AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
+ AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
+ AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
+ AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
+
+ err = ar->open(ar);
+ if (err)
+ goto out;
+
+ err = ar9170_init_mac(ar);
+ if (err)
+ goto out;
+
+ err = ar9170_set_qos(ar);
+ if (err)
+ goto out;
+
+ err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
+ if (err)
+ goto out;
+
+ err = ar9170_init_rf(ar);
+ if (err)
+ goto out;
+
+ /* start DMA */
+ err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
+ if (err)
+ goto out;
+
+ ar->state = AR9170_STARTED;
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static void ar9170_op_stop(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+
+ if (IS_STARTED(ar))
+ ar->state = AR9170_IDLE;
+
+ mutex_lock(&ar->mutex);
+
+ cancel_delayed_work_sync(&ar->tx_status_janitor);
+ cancel_work_sync(&ar->filter_config_work);
+ cancel_work_sync(&ar->beacon_work);
+ skb_queue_purge(&ar->global_tx_status_waste);
+ skb_queue_purge(&ar->global_tx_status);
+
+ if (IS_ACCEPTING_CMD(ar)) {
+ ar9170_set_leds_state(ar, 0);
+
+ /* stop DMA */
+ ar9170_write_reg(ar, 0x1c3d30, 0);
+ ar->stop(ar);
+ }
+
+ mutex_unlock(&ar->mutex);
+}
+
+int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ieee80211_hdr *hdr;
+ struct ar9170_tx_control *txc;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_rate *rate = NULL;
+ struct ieee80211_tx_rate *txrate;
+ unsigned int queue = skb_get_queue_mapping(skb);
+ unsigned long flags = 0;
+ struct ar9170_sta_info *sta_info = NULL;
+ u32 power, chains;
+ u16 keytype = 0;
+ u16 len, icv = 0;
+ int err;
+ bool tx_status;
+
+ if (unlikely(!IS_STARTED(ar)))
+ goto err_free;
+
+ hdr = (void *)skb->data;
+ info = IEEE80211_SKB_CB(skb);
+ len = skb->len;
+
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ ar->tx_stats[queue].len++;
+ ar->tx_stats[queue].count++;
+ if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
+ ieee80211_stop_queue(hw, queue);
+
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+
+ txc = (void *)skb_push(skb, sizeof(*txc));
+
+ tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
+ ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
+
+ if (info->control.hw_key) {
+ icv = info->control.hw_key->icv_len;
+
+ switch (info->control.hw_key->alg) {
+ case ALG_WEP:
+ keytype = AR9170_TX_MAC_ENCR_RC4;
+ break;
+ case ALG_TKIP:
+ keytype = AR9170_TX_MAC_ENCR_RC4;
+ break;
+ case ALG_CCMP:
+ keytype = AR9170_TX_MAC_ENCR_AES;
+ break;
+ default:
+ WARN_ON(1);
+ goto err_dequeue;
+ }
+ }
+
+ /* Length */
+ txc->length = cpu_to_le16(len + icv + 4);
+
+ txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
+ AR9170_TX_MAC_BACKOFF);
+ txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
+ AR9170_TX_MAC_QOS_SHIFT);
+ txc->mac_control |= cpu_to_le16(keytype);
+ txc->phy_control = cpu_to_le32(0);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+
+ txrate = &info->control.rates[0];
+
+ if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
+ else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+
+ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
+
+ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
+
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
+ /* this works because 40 MHz is 2 and dup is 3 */
+ if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
+
+ if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
+
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ u32 r = txrate->idx;
+ u8 *txpower;
+
+ r <<= AR9170_TX_PHY_MCS_SHIFT;
+ if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK))
+ goto err_dequeue;
+ txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
+ txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
+
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ if (info->band == IEEE80211_BAND_5GHZ)
+ txpower = ar->power_5G_ht40;
+ else
+ txpower = ar->power_2G_ht40;
+ } else {
+ if (info->band == IEEE80211_BAND_5GHZ)
+ txpower = ar->power_5G_ht20;
+ else
+ txpower = ar->power_2G_ht20;
+ }
+
+ power = txpower[(txrate->idx) & 7];
+ } else {
+ u8 *txpower;
+ u32 mod;
+ u32 phyrate;
+ u8 idx = txrate->idx;
+
+ if (info->band != IEEE80211_BAND_2GHZ) {
+ idx += 4;
+ txpower = ar->power_5G_leg;
+ mod = AR9170_TX_PHY_MOD_OFDM;
+ } else {
+ if (idx < 4) {
+ txpower = ar->power_2G_cck;
+ mod = AR9170_TX_PHY_MOD_CCK;
+ } else {
+ mod = AR9170_TX_PHY_MOD_OFDM;
+ txpower = ar->power_2G_ofdm;
+ }
+ }
+
+ rate = &__ar9170_ratetable[idx];
+
+ phyrate = rate->hw_value & 0xF;
+ power = txpower[(rate->hw_value & 0x30) >> 4];
+ phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
+
+ txc->phy_control |= cpu_to_le32(mod);
+ txc->phy_control |= cpu_to_le32(phyrate);
+ }
+
+ power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
+ power &= AR9170_TX_PHY_TX_PWR_MASK;
+ txc->phy_control |= cpu_to_le32(power);
+
+ /* set TX chains */
+ if (ar->eeprom.tx_mask == 1) {
+ chains = AR9170_TX_PHY_TXCHAIN_1;
+ } else {
+ chains = AR9170_TX_PHY_TXCHAIN_2;
+
+ /* >= 36M legacy OFDM - use only one chain */
+ if (rate && rate->bitrate >= 360)
+ chains = AR9170_TX_PHY_TXCHAIN_1;
+ }
+ txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
+
+ if (tx_status) {
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ /*
+ * WARNING:
+ * Putting the QoS queue bits into an unexplored territory is
+ * certainly not elegant.
+ *
+ * In my defense: This idea provides a reasonable way to
+ * smuggle valuable information to the tx_status callback.
+ * Also, the idea behind this bit-abuse came straight from
+ * the original driver code.
+ */
+
+ txc->phy_control |=
+ cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
+
+ if (info->control.sta) {
+ sta_info = (void *) info->control.sta->drv_priv;
+ skb_queue_tail(&sta_info->tx_status[queue], skb);
+ } else {
+ skb_queue_tail(&ar->global_tx_status, skb);
+
+ queue_delayed_work(ar->hw->workqueue,
+ &ar->tx_status_janitor,
+ msecs_to_jiffies(100));
+ }
+ }
+
+ err = ar->tx(ar, skb, tx_status, 0);
+ if (unlikely(tx_status && err)) {
+ if (info->control.sta)
+ skb_unlink(skb, &sta_info->tx_status[queue]);
+ else
+ skb_unlink(skb, &ar->global_tx_status);
+ }
+
+ return NETDEV_TX_OK;
+
+err_dequeue:
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ ar->tx_stats[queue].len--;
+ ar->tx_stats[queue].count--;
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+
+err_free:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int ar9170_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0;
+
+ mutex_lock(&ar->mutex);
+
+ if (ar->vif) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ar->vif = conf->vif;
+ memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
+
+ if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
+ ar->rx_software_decryption = true;
+ ar->disable_offload = true;
+ }
+
+ ar->cur_filter = 0;
+ ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
+ err = ar9170_update_frame_filter(ar);
+ if (err)
+ goto unlock;
+
+ err = ar9170_set_operating_mode(ar);
+
+unlock:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct ar9170 *ar = hw->priv;
+
+ mutex_lock(&ar->mutex);
+ ar->vif = NULL;
+ ar->want_filter = 0;
+ ar9170_update_frame_filter(ar);
+ ar9170_set_beacon_timers(ar);
+ dev_kfree_skb(ar->beacon);
+ ar->beacon = NULL;
+ ar->sniffer_enabled = false;
+ ar->rx_software_decryption = false;
+ ar9170_set_operating_mode(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0;
+
+ mutex_lock(&ar->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ /*
+ * is it long_frame_max_tx_count or short_frame_max_tx_count?
+ */
+
+ err = ar9170_set_hwretry_limit(ar,
+ ar->hw->conf.long_frame_max_tx_count);
+ if (err)
+ goto out;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) {
+ err = ar9170_set_beacon_timers(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ err = ar9170_set_channel(ar, hw->conf.channel,
+ AR9170_RFI_NONE, AR9170_BW_20);
+ if (err)
+ goto out;
+ /* adjust slot time for 5 GHz */
+ if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
+ err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
+ 9 << 10);
+ }
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static int ar9170_op_config_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_if_conf *conf)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0;
+
+ mutex_lock(&ar->mutex);
+
+ if (conf->changed & IEEE80211_IFCC_BSSID) {
+ memcpy(ar->bssid, conf->bssid, ETH_ALEN);
+ err = ar9170_set_operating_mode(ar);
+ }
+
+ if (conf->changed & IEEE80211_IFCC_BEACON) {
+ err = ar9170_update_beacon(ar);
+
+ if (err)
+ goto out;
+ err = ar9170_set_beacon_timers(ar);
+ }
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static void ar9170_set_filters(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ filter_config_work);
+ int err;
+
+ mutex_lock(&ar->mutex);
+ if (unlikely(!IS_STARTED(ar)))
+ goto unlock;
+
+ if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) {
+ err = ar9170_set_operating_mode(ar);
+ if (err)
+ goto unlock;
+ }
+
+ if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) {
+ err = ar9170_update_multicast(ar);
+ if (err)
+ goto unlock;
+ }
+
+ if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER)
+ err = ar9170_update_frame_filter(ar);
+
+unlock:
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ int mc_count, struct dev_mc_list *mclist)
+{
+ struct ar9170 *ar = hw->priv;
+
+ /* mask supported flags */
+ *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROMISC_IN_BSS;
+
+ /*
+ * We can support more by setting the sniffer bit and
+ * then checking the error flags, later.
+ */
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI) {
+ ar->want_mc_hash = ~0ULL;
+ } else {
+ u64 mchash;
+ int i;
+
+ /* always get broadcast frames */
+ mchash = 1ULL << (0xff>>2);
+
+ for (i = 0; i < mc_count; i++) {
+ if (WARN_ON(!mclist))
+ break;
+ mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
+ mclist = mclist->next;
+ }
+ ar->want_mc_hash = mchash;
+ }
+ ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST;
+ }
+
+ if (changed_flags & FIF_CONTROL) {
+ u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
+ AR9170_MAC_REG_FTF_RTS |
+ AR9170_MAC_REG_FTF_CTS |
+ AR9170_MAC_REG_FTF_ACK |
+ AR9170_MAC_REG_FTF_CFE |
+ AR9170_MAC_REG_FTF_CFE_ACK;
+
+ if (*new_flags & FIF_CONTROL)
+ ar->want_filter = ar->cur_filter | filter;
+ else
+ ar->want_filter = ar->cur_filter & ~filter;
+
+ ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER;
+ }
+
+ if (changed_flags & FIF_PROMISC_IN_BSS) {
+ ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
+ ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC;
+ }
+
+ if (likely(IS_STARTED(ar)))
+ queue_work(ar->hw->workqueue, &ar->filter_config_work);
+}
+
+static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0;
+
+ mutex_lock(&ar->mutex);
+
+ ar9170_regwrite_begin(ar);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
+
+#ifndef CONFIG_AR9170_LEDS
+ /* enable assoc LED. */
+ err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
+#endif /* CONFIG_AR9170_LEDS */
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime = 20;
+
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+
+ ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ u32 cck, ofdm;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) {
+ ofdm = bss_conf->basic_rates;
+ cck = 0;
+ } else {
+ /* four cck rates */
+ cck = bss_conf->basic_rates & 0xf;
+ ofdm = bss_conf->basic_rates >> 4;
+ }
+ ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
+ ofdm << 8 | cck);
+ }
+
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+ mutex_unlock(&ar->mutex);
+}
+
+static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+ int err;
+ u32 tsf_low;
+ u32 tsf_high;
+ u64 tsf;
+
+ mutex_lock(&ar->mutex);
+ err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
+ if (!err)
+ err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
+ mutex_unlock(&ar->mutex);
+
+ if (WARN_ON(err))
+ return 0;
+
+ tsf = tsf_high;
+ tsf = (tsf << 32) | tsf_low;
+ return tsf;
+}
+
+static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0, i;
+ u8 ktype;
+
+ if ((!ar->vif) || (ar->disable_offload))
+ return -EOPNOTSUPP;
+
+ switch (key->alg) {
+ case ALG_WEP:
+ if (key->keylen == LEN_WEP40)
+ ktype = AR9170_ENC_ALG_WEP64;
+ else
+ ktype = AR9170_ENC_ALG_WEP128;
+ break;
+ case ALG_TKIP:
+ ktype = AR9170_ENC_ALG_TKIP;
+ break;
+ case ALG_CCMP:
+ ktype = AR9170_ENC_ALG_AESCCMP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ar->mutex);
+ if (cmd == SET_KEY) {
+ if (unlikely(!IS_STARTED(ar))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* group keys need all-zeroes address */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ sta = NULL;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ for (i = 0; i < 64; i++)
+ if (!(ar->usedkeys & BIT(i)))
+ break;
+ if (i == 64) {
+ ar->rx_software_decryption = true;
+ ar9170_set_operating_mode(ar);
+ err = -ENOSPC;
+ goto out;
+ }
+ } else {
+ i = 64 + key->keyidx;
+ }
+
+ key->hw_key_idx = i;
+
+ err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
+ key->key, min_t(u8, 16, key->keylen));
+ if (err)
+ goto out;
+
+ if (key->alg == ALG_TKIP) {
+ err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
+ ktype, 1, key->key + 16, 16);
+ if (err)
+ goto out;
+
+ /*
+ * hardware is not capable generating the MMIC
+ * for fragmented frames!
+ */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ }
+
+ if (i < 64)
+ ar->usedkeys |= BIT(i);
+
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ } else {
+ if (unlikely(!IS_STARTED(ar))) {
+ /* The device is gone... together with the key ;-) */
+ err = 0;
+ goto out;
+ }
+
+ err = ar9170_disable_key(ar, key->hw_key_idx);
+ if (err)
+ goto out;
+
+ if (key->hw_key_idx < 64) {
+ ar->usedkeys &= ~BIT(key->hw_key_idx);
+ } else {
+ err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
+ AR9170_ENC_ALG_NONE, 0,
+ NULL, 0);
+ if (err)
+ goto out;
+
+ if (key->alg == ALG_TKIP) {
+ err = ar9170_upload_key(ar, key->hw_key_idx,
+ NULL,
+ AR9170_ENC_ALG_NONE, 1,
+ NULL, 0);
+ if (err)
+ goto out;
+ }
+
+ }
+ }
+
+ ar9170_regwrite_begin(ar);
+ ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
+ ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+
+out:
+ mutex_unlock(&ar->mutex);
+
+ return err;
+}
+
+static void ar9170_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ar9170_sta_info *info = (void *) sta->drv_priv;
+ struct sk_buff *skb;
+ unsigned int i;
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ for (i = 0; i < ar->hw->queues; i++)
+ skb_queue_head_init(&info->tx_status[i]);
+ break;
+
+ case STA_NOTIFY_REMOVE:
+
+ /*
+ * transfer all outstanding frames that need a tx_status
+ * reports to the global tx_status queue
+ */
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ while ((skb = skb_dequeue(&info->tx_status[i]))) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: queueing frame in "
+ "global tx_status queue =>\n",
+ wiphy_name(ar->hw->wiphy));
+
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ skb_queue_tail(&ar->global_tx_status, skb);
+ }
+ }
+ queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
+ msecs_to_jiffies(100));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int ar9170_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ar9170 *ar = hw->priv;
+ u32 val;
+ int err;
+
+ mutex_lock(&ar->mutex);
+ err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
+ ar->stats.dot11ACKFailureCount += val;
+
+ memcpy(stats, &ar->stats, sizeof(*stats));
+ mutex_unlock(&ar->mutex);
+
+ return 0;
+}
+
+static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_tx_queue_stats *tx_stats)
+{
+ struct ar9170 *ar = hw->priv;
+
+ spin_lock_bh(&ar->tx_stats_lock);
+ memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
+ spin_unlock_bh(&ar->tx_stats_lock);
+
+ return 0;
+}
+
+static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+{
+ struct ar9170 *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->mutex);
+ if ((param) && !(queue > ar->hw->queues)) {
+ memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
+ param, sizeof(*param));
+
+ ret = ar9170_set_qos(ar);
+ } else
+ ret = -EINVAL;
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static const struct ieee80211_ops ar9170_ops = {
+ .start = ar9170_op_start,
+ .stop = ar9170_op_stop,
+ .tx = ar9170_op_tx,
+ .add_interface = ar9170_op_add_interface,
+ .remove_interface = ar9170_op_remove_interface,
+ .config = ar9170_op_config,
+ .config_interface = ar9170_op_config_interface,
+ .configure_filter = ar9170_op_configure_filter,
+ .conf_tx = ar9170_conf_tx,
+ .bss_info_changed = ar9170_op_bss_info_changed,
+ .get_tsf = ar9170_op_get_tsf,
+ .set_key = ar9170_set_key,
+ .sta_notify = ar9170_sta_notify,
+ .get_stats = ar9170_get_stats,
+ .get_tx_stats = ar9170_get_tx_stats,
+};
+
+void *ar9170_alloc(size_t priv_size)
+{
+ struct ieee80211_hw *hw;
+ struct ar9170 *ar;
+ int i;
+
+ hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ ar = hw->priv;
+ ar->hw = hw;
+
+ mutex_init(&ar->mutex);
+ spin_lock_init(&ar->cmdlock);
+ spin_lock_init(&ar->tx_stats_lock);
+ skb_queue_head_init(&ar->global_tx_status);
+ skb_queue_head_init(&ar->global_tx_status_waste);
+ INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
+ INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
+ INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor);
+
+ /* all hw supports 2.4 GHz, so set channel to 1 by default */
+ ar->channel = &ar9170_2ghz_chantable[0];
+
+ /* first part of wiphy init */
+ ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_WDS) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_NOISE_DBM;
+
+ ar->hw->queues = __AR9170_NUM_TXQ;
+ ar->hw->extra_tx_headroom = 8;
+ ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
+
+ ar->hw->max_rates = 1;
+ ar->hw->max_rate_tries = 3;
+
+ for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
+ ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
+
+ return ar;
+}
+
+static int ar9170_read_eeprom(struct ar9170 *ar)
+{
+#define RW 8 /* number of words to read at once */
+#define RB (sizeof(u32) * RW)
+ DECLARE_MAC_BUF(mbuf);
+ u8 *eeprom = (void *)&ar->eeprom;
+ u8 *addr = ar->eeprom.mac_address;
+ __le32 offsets[RW];
+ int i, j, err, bands = 0;
+
+ BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
+
+ BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
+#ifndef __CHECKER__
+ /* don't want to handle trailing remains */
+ BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
+#endif
+
+ for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
+ for (j = 0; j < RW; j++)
+ offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
+ RB * i + 4 * j);
+
+ err = ar->exec_cmd(ar, AR9170_CMD_RREG,
+ RB, (u8 *) &offsets,
+ RB, eeprom + RB * i);
+ if (err)
+ return err;
+ }
+
+#undef RW
+#undef RB
+
+ if (ar->eeprom.length == cpu_to_le16(0xFFFF))
+ return -ENODATA;
+
+ if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
+ bands++;
+ }
+ if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
+ ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
+ bands++;
+ }
+ /*
+ * I measured this, a bandswitch takes roughly
+ * 135 ms and a frequency switch about 80.
+ *
+ * FIXME: measure these values again once EEPROM settings
+ * are used, that will influence them!
+ */
+ if (bands == 2)
+ ar->hw->channel_change_time = 135 * 1000;
+ else
+ ar->hw->channel_change_time = 80 * 1000;
+
+ /* second part of wiphy init */
+ SET_IEEE80211_PERM_ADDR(ar->hw, addr);
+
+ return bands ? 0 : -EINVAL;
+}
+
+int ar9170_register(struct ar9170 *ar, struct device *pdev)
+{
+ int err;
+
+ /* try to read EEPROM, init MAC addr */
+ err = ar9170_read_eeprom(ar);
+ if (err)
+ goto err_out;
+
+ err = ieee80211_register_hw(ar->hw);
+ if (err)
+ goto err_out;
+
+ err = ar9170_init_leds(ar);
+ if (err)
+ goto err_unreg;
+
+#ifdef CONFIG_AR9170_LEDS
+ err = ar9170_register_leds(ar);
+ if (err)
+ goto err_unreg;
+#endif /* CONFIG_AR9170_LEDS */
+
+ dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
+ wiphy_name(ar->hw->wiphy));
+
+ return err;
+
+err_unreg:
+ ieee80211_unregister_hw(ar->hw);
+
+err_out:
+ return err;
+}
+
+void ar9170_unregister(struct ar9170 *ar)
+{
+#ifdef CONFIG_AR9170_LEDS
+ ar9170_unregister_leds(ar);
+#endif /* CONFIG_AR9170_LEDS */
+
+ ieee80211_unregister_hw(ar->hw);
+ mutex_destroy(&ar->mutex);
+}
diff --git a/drivers/net/wireless/ar9170/phy.c b/drivers/net/wireless/ar9170/phy.c
new file mode 100644
index 000000000000..6ce20754b8e7
--- /dev/null
+++ b/drivers/net/wireless/ar9170/phy.c
@@ -0,0 +1,1240 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * PHY and RF code
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/bitrev.h>
+#include "ar9170.h"
+#include "cmd.h"
+
+static int ar9170_init_power_cal(struct ar9170 *ar)
+{
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(0x1bc000 + 0x993c, 0x7f);
+ ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f);
+ ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f);
+
+ ar9170_regwrite_finish();
+ return ar9170_regwrite_result();
+}
+
+struct ar9170_phy_init {
+ u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
+};
+
+static struct ar9170_phy_init ar5416_phy_init[] = {
+ { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
+ { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
+ { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
+ { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
+ { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
+ { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
+ { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
+ { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
+ { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
+ { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
+ { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
+ { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
+ { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
+ { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
+ { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
+ { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, },
+ { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
+ { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
+ { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, },
+ { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
+ { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
+ { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
+ { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
+ { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
+ { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
+ { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
+ { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
+ { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
+ { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
+ { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
+ { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
+ { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
+ { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
+ { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
+ { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
+ { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
+ { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
+ { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
+ { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
+ { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
+ { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
+ { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
+ { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, },
+ { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
+ { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
+ { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
+ { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
+ { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
+ { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
+ { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
+ { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
+ { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
+ { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
+ { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
+ { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
+ { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
+ { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
+ { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
+ { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
+ { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
+ { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
+ { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
+ { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
+ { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
+ { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
+ { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
+ { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
+ { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
+ { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
+ { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
+ { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
+ { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
+ { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
+ { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
+ { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
+ { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
+ { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
+ { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
+ { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
+ { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
+ { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
+ { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
+ { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
+ { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
+ { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
+ { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
+ { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
+ { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
+ { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
+ { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
+ { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
+ { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
+ { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
+ { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
+ { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
+ { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
+ { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
+ { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
+ { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
+ { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
+ { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
+ { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
+ { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
+ { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
+ { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
+ { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
+ { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
+ { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
+ { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
+ { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
+ { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
+ { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
+ { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
+ { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
+ { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
+ { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
+ { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
+ { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
+ { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
+ { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
+ { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
+ { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
+ { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
+ { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
+ { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
+ { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
+ { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
+ { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
+ { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
+ { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
+ { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
+ { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
+ { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
+ { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
+ { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
+ { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
+ { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
+ { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
+ { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
+ { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
+ { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
+ { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
+ { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
+ { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
+ { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
+ { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
+ { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
+ { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
+ { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
+ { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
+ { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+ { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
+ { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
+ { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
+ { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
+ { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
+ { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
+ { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
+ { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
+ { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
+ { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
+ { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
+ { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
+ { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
+ { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
+ { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
+ { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
+ { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
+ { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
+ { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
+ { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
+ { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+ { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
+ { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
+ { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
+ { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
+ { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
+ { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
+ { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
+ { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
+ { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
+ { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
+ { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
+ { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
+ { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
+ { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
+ { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
+ { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
+ { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
+ { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
+ { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
+};
+
+int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+{
+ int i, err;
+ u32 val;
+ bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+ bool is_40mhz = false; /* XXX: for now */
+
+ ar9170_regwrite_begin(ar);
+
+ for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
+ if (is_40mhz) {
+ if (is_2ghz)
+ val = ar5416_phy_init[i]._2ghz_40;
+ else
+ val = ar5416_phy_init[i]._5ghz_40;
+ } else {
+ if (is_2ghz)
+ val = ar5416_phy_init[i]._2ghz_20;
+ else
+ val = ar5416_phy_init[i]._5ghz_20;
+ }
+
+ ar9170_regwrite(ar5416_phy_init[i].reg, val);
+ }
+
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+ if (err)
+ return err;
+
+ /* XXX: use EEPROM data here! */
+
+ err = ar9170_init_power_cal(ar);
+ if (err)
+ return err;
+
+ /* XXX: remove magic! */
+ if (is_2ghz)
+ err = ar9170_write_reg(ar, 0x1d4014, 0x5163);
+ else
+ err = ar9170_write_reg(ar, 0x1d4014, 0x5143);
+
+ return err;
+}
+
+struct ar9170_rf_init {
+ u32 reg, _5ghz, _2ghz;
+};
+
+static struct ar9170_rf_init ar9170_rf_init[] = {
+ /* bank 0 */
+ { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
+ { 0x1c58e0, 0x02008020, 0x02008020},
+ /* bank 1 */
+ { 0x1c58b0, 0x02108421, 0x02108421},
+ { 0x1c58ec, 0x00000008, 0x00000008},
+ /* bank 2 */
+ { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
+ { 0x1c58e0, 0x00000420, 0x00000420},
+ /* bank 3 */
+ { 0x1c58f0, 0x01400018, 0x01c00018},
+ /* bank 4 */
+ { 0x1c58b0, 0x000001a1, 0x000001a1},
+ { 0x1c58e8, 0x00000001, 0x00000001},
+ /* bank 5 */
+ { 0x1c58b0, 0x00000013, 0x00000013},
+ { 0x1c58e4, 0x00000002, 0x00000002},
+ /* bank 6 */
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00002c00, 0x00002c00},
+ { 0x1c58b0, 0x00004800, 0x00004800},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00006000, 0x00006000},
+ { 0x1c58b0, 0x00001000, 0x00001000},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00087c00, 0x00087c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00005400, 0x00005400},
+ { 0x1c58b0, 0x00000c00, 0x00000c00},
+ { 0x1c58b0, 0x00001800, 0x00001800},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00002c00, 0x00002c00},
+ { 0x1c58b0, 0x00003c00, 0x00003c00},
+ { 0x1c58b0, 0x00003800, 0x00003800},
+ { 0x1c58b0, 0x00001c00, 0x00001c00},
+ { 0x1c58b0, 0x00000800, 0x00000800},
+ { 0x1c58b0, 0x00000408, 0x00000408},
+ { 0x1c58b0, 0x00004c15, 0x00004c15},
+ { 0x1c58b0, 0x00004188, 0x00004188},
+ { 0x1c58b0, 0x0000201e, 0x0000201e},
+ { 0x1c58b0, 0x00010408, 0x00010408},
+ { 0x1c58b0, 0x00000801, 0x00000801},
+ { 0x1c58b0, 0x00000c08, 0x00000c08},
+ { 0x1c58b0, 0x0000181e, 0x0000181e},
+ { 0x1c58b0, 0x00001016, 0x00001016},
+ { 0x1c58b0, 0x00002800, 0x00002800},
+ { 0x1c58b0, 0x00004010, 0x00004010},
+ { 0x1c58b0, 0x0000081c, 0x0000081c},
+ { 0x1c58b0, 0x00000115, 0x00000115},
+ { 0x1c58b0, 0x00000015, 0x00000015},
+ { 0x1c58b0, 0x00000066, 0x00000066},
+ { 0x1c58b0, 0x0000001c, 0x0000001c},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000004, 0x00000004},
+ { 0x1c58b0, 0x00000015, 0x00000015},
+ { 0x1c58b0, 0x0000001f, 0x0000001f},
+ { 0x1c58e0, 0x00000000, 0x00000400},
+ /* bank 7 */
+ { 0x1c58b0, 0x000000a0, 0x000000a0},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000040, 0x00000040},
+ { 0x1c58f0, 0x0000001c, 0x0000001c},
+};
+
+static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
+{
+ int err, i;
+
+ ar9170_regwrite_begin(ar);
+
+ for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++)
+ ar9170_regwrite(ar9170_rf_init[i].reg,
+ band5ghz ? ar9170_rf_init[i]._5ghz
+ : ar9170_rf_init[i]._2ghz);
+
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+ if (err)
+ printk(KERN_ERR "%s: rf init failed\n",
+ wiphy_name(ar->hw->wiphy));
+ return err;
+}
+
+static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
+ u32 freq, enum ar9170_bw bw)
+{
+ int err;
+ u32 d0, d1, td0, td1, fd0, fd1;
+ u8 chansel;
+ u8 refsel0 = 1, refsel1 = 0;
+ u8 lf_synth = 0;
+
+ switch (bw) {
+ case AR9170_BW_40_ABOVE:
+ freq += 10;
+ break;
+ case AR9170_BW_40_BELOW:
+ freq -= 10;
+ break;
+ case AR9170_BW_20:
+ break;
+ case __AR9170_NUM_BW:
+ BUG();
+ }
+
+ if (band5ghz) {
+ if (freq % 10) {
+ chansel = (freq - 4800) / 5;
+ } else {
+ chansel = ((freq - 4800) / 10) * 2;
+ refsel0 = 0;
+ refsel1 = 1;
+ }
+ chansel = byte_rev_table[chansel];
+ } else {
+ if (freq == 2484) {
+ chansel = 10 + (freq - 2274) / 5;
+ lf_synth = 1;
+ } else
+ chansel = 16 + (freq - 2272) / 5;
+ chansel *= 4;
+ chansel = byte_rev_table[chansel];
+ }
+
+ d1 = chansel;
+ d0 = 0x21 |
+ refsel0 << 3 |
+ refsel1 << 2 |
+ lf_synth << 1;
+ td0 = d0 & 0x1f;
+ td1 = d1 & 0x1f;
+ fd0 = td1 << 5 | td0;
+
+ td0 = (d0 >> 5) & 0x7;
+ td1 = (d1 >> 5) & 0x7;
+ fd1 = td1 << 5 | td0;
+
+ ar9170_regwrite_begin(ar);
+
+ ar9170_regwrite(0x1c58b0, fd0);
+ ar9170_regwrite(0x1c58e8, fd1);
+
+ ar9170_regwrite_finish();
+ err = ar9170_regwrite_result();
+ if (err)
+ return err;
+
+ msleep(10);
+
+ return 0;
+}
+
+struct ar9170_phy_freq_params {
+ u8 coeff_exp;
+ u16 coeff_man;
+ u8 coeff_exp_shgi;
+ u16 coeff_man_shgi;
+};
+
+struct ar9170_phy_freq_entry {
+ u16 freq;
+ struct ar9170_phy_freq_params params[__AR9170_NUM_BW];
+};
+
+/* NB: must be in sync with channel tables in main! */
+static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = {
+/*
+ * freq,
+ * 20MHz,
+ * 40MHz (below),
+ * 40Mhz (above),
+ */
+ { 2412, {
+ { 3, 21737, 3, 19563, },
+ { 3, 21827, 3, 19644, },
+ { 3, 21647, 3, 19482, },
+ } },
+ { 2417, {
+ { 3, 21692, 3, 19523, },
+ { 3, 21782, 3, 19604, },
+ { 3, 21602, 3, 19442, },
+ } },
+ { 2422, {
+ { 3, 21647, 3, 19482, },
+ { 3, 21737, 3, 19563, },
+ { 3, 21558, 3, 19402, },
+ } },
+ { 2427, {
+ { 3, 21602, 3, 19442, },
+ { 3, 21692, 3, 19523, },
+ { 3, 21514, 3, 19362, },
+ } },
+ { 2432, {
+ { 3, 21558, 3, 19402, },
+ { 3, 21647, 3, 19482, },
+ { 3, 21470, 3, 19323, },
+ } },
+ { 2437, {
+ { 3, 21514, 3, 19362, },
+ { 3, 21602, 3, 19442, },
+ { 3, 21426, 3, 19283, },
+ } },
+ { 2442, {
+ { 3, 21470, 3, 19323, },
+ { 3, 21558, 3, 19402, },
+ { 3, 21382, 3, 19244, },
+ } },
+ { 2447, {
+ { 3, 21426, 3, 19283, },
+ { 3, 21514, 3, 19362, },
+ { 3, 21339, 3, 19205, },
+ } },
+ { 2452, {
+ { 3, 21382, 3, 19244, },
+ { 3, 21470, 3, 19323, },
+ { 3, 21295, 3, 19166, },
+ } },
+ { 2457, {
+ { 3, 21339, 3, 19205, },
+ { 3, 21426, 3, 19283, },
+ { 3, 21252, 3, 19127, },
+ } },
+ { 2462, {
+ { 3, 21295, 3, 19166, },
+ { 3, 21382, 3, 19244, },
+ { 3, 21209, 3, 19088, },
+ } },
+ { 2467, {
+ { 3, 21252, 3, 19127, },
+ { 3, 21339, 3, 19205, },
+ { 3, 21166, 3, 19050, },
+ } },
+ { 2472, {
+ { 3, 21209, 3, 19088, },
+ { 3, 21295, 3, 19166, },
+ { 3, 21124, 3, 19011, },
+ } },
+ { 2484, {
+ { 3, 21107, 3, 18996, },
+ { 3, 21192, 3, 19073, },
+ { 3, 21022, 3, 18920, },
+ } },
+ { 4920, {
+ { 4, 21313, 4, 19181, },
+ { 4, 21356, 4, 19220, },
+ { 4, 21269, 4, 19142, },
+ } },
+ { 4940, {
+ { 4, 21226, 4, 19104, },
+ { 4, 21269, 4, 19142, },
+ { 4, 21183, 4, 19065, },
+ } },
+ { 4960, {
+ { 4, 21141, 4, 19027, },
+ { 4, 21183, 4, 19065, },
+ { 4, 21098, 4, 18988, },
+ } },
+ { 4980, {
+ { 4, 21056, 4, 18950, },
+ { 4, 21098, 4, 18988, },
+ { 4, 21014, 4, 18912, },
+ } },
+ { 5040, {
+ { 4, 20805, 4, 18725, },
+ { 4, 20846, 4, 18762, },
+ { 4, 20764, 4, 18687, },
+ } },
+ { 5060, {
+ { 4, 20723, 4, 18651, },
+ { 4, 20764, 4, 18687, },
+ { 4, 20682, 4, 18614, },
+ } },
+ { 5080, {
+ { 4, 20641, 4, 18577, },
+ { 4, 20682, 4, 18614, },
+ { 4, 20601, 4, 18541, },
+ } },
+ { 5180, {
+ { 4, 20243, 4, 18219, },
+ { 4, 20282, 4, 18254, },
+ { 4, 20204, 4, 18183, },
+ } },
+ { 5200, {
+ { 4, 20165, 4, 18148, },
+ { 4, 20204, 4, 18183, },
+ { 4, 20126, 4, 18114, },
+ } },
+ { 5220, {
+ { 4, 20088, 4, 18079, },
+ { 4, 20126, 4, 18114, },
+ { 4, 20049, 4, 18044, },
+ } },
+ { 5240, {
+ { 4, 20011, 4, 18010, },
+ { 4, 20049, 4, 18044, },
+ { 4, 19973, 4, 17976, },
+ } },
+ { 5260, {
+ { 4, 19935, 4, 17941, },
+ { 4, 19973, 4, 17976, },
+ { 4, 19897, 4, 17907, },
+ } },
+ { 5280, {
+ { 4, 19859, 4, 17873, },
+ { 4, 19897, 4, 17907, },
+ { 4, 19822, 4, 17840, },
+ } },
+ { 5300, {
+ { 4, 19784, 4, 17806, },
+ { 4, 19822, 4, 17840, },
+ { 4, 19747, 4, 17772, },
+ } },
+ { 5320, {
+ { 4, 19710, 4, 17739, },
+ { 4, 19747, 4, 17772, },
+ { 4, 19673, 4, 17706, },
+ } },
+ { 5500, {
+ { 4, 19065, 4, 17159, },
+ { 4, 19100, 4, 17190, },
+ { 4, 19030, 4, 17127, },
+ } },
+ { 5520, {
+ { 4, 18996, 4, 17096, },
+ { 4, 19030, 4, 17127, },
+ { 4, 18962, 4, 17065, },
+ } },
+ { 5540, {
+ { 4, 18927, 4, 17035, },
+ { 4, 18962, 4, 17065, },
+ { 4, 18893, 4, 17004, },
+ } },
+ { 5560, {
+ { 4, 18859, 4, 16973, },
+ { 4, 18893, 4, 17004, },
+ { 4, 18825, 4, 16943, },
+ } },
+ { 5580, {
+ { 4, 18792, 4, 16913, },
+ { 4, 18825, 4, 16943, },
+ { 4, 18758, 4, 16882, },
+ } },
+ { 5600, {
+ { 4, 18725, 4, 16852, },
+ { 4, 18758, 4, 16882, },
+ { 4, 18691, 4, 16822, },
+ } },
+ { 5620, {
+ { 4, 18658, 4, 16792, },
+ { 4, 18691, 4, 16822, },
+ { 4, 18625, 4, 16762, },
+ } },
+ { 5640, {
+ { 4, 18592, 4, 16733, },
+ { 4, 18625, 4, 16762, },
+ { 4, 18559, 4, 16703, },
+ } },
+ { 5660, {
+ { 4, 18526, 4, 16673, },
+ { 4, 18559, 4, 16703, },
+ { 4, 18493, 4, 16644, },
+ } },
+ { 5680, {
+ { 4, 18461, 4, 16615, },
+ { 4, 18493, 4, 16644, },
+ { 4, 18428, 4, 16586, },
+ } },
+ { 5700, {
+ { 4, 18396, 4, 16556, },
+ { 4, 18428, 4, 16586, },
+ { 4, 18364, 4, 16527, },
+ } },
+ { 5745, {
+ { 4, 18252, 4, 16427, },
+ { 4, 18284, 4, 16455, },
+ { 4, 18220, 4, 16398, },
+ } },
+ { 5765, {
+ { 4, 18189, 5, 32740, },
+ { 4, 18220, 4, 16398, },
+ { 4, 18157, 5, 32683, },
+ } },
+ { 5785, {
+ { 4, 18126, 5, 32626, },
+ { 4, 18157, 5, 32683, },
+ { 4, 18094, 5, 32570, },
+ } },
+ { 5805, {
+ { 4, 18063, 5, 32514, },
+ { 4, 18094, 5, 32570, },
+ { 4, 18032, 5, 32458, },
+ } },
+ { 5825, {
+ { 4, 18001, 5, 32402, },
+ { 4, 18032, 5, 32458, },
+ { 4, 17970, 5, 32347, },
+ } },
+ { 5170, {
+ { 4, 20282, 4, 18254, },
+ { 4, 20321, 4, 18289, },
+ { 4, 20243, 4, 18219, },
+ } },
+ { 5190, {
+ { 4, 20204, 4, 18183, },
+ { 4, 20243, 4, 18219, },
+ { 4, 20165, 4, 18148, },
+ } },
+ { 5210, {
+ { 4, 20126, 4, 18114, },
+ { 4, 20165, 4, 18148, },
+ { 4, 20088, 4, 18079, },
+ } },
+ { 5230, {
+ { 4, 20049, 4, 18044, },
+ { 4, 20088, 4, 18079, },
+ { 4, 20011, 4, 18010, },
+ } },
+};
+
+static const struct ar9170_phy_freq_params *
+ar9170_get_hw_dyn_params(struct ieee80211_channel *channel,
+ enum ar9170_bw bw)
+{
+ unsigned int chanidx = 0;
+ u16 freq = 2412;
+
+ if (channel) {
+ chanidx = channel->hw_value;
+ freq = channel->center_freq;
+ }
+
+ BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params));
+
+ BUILD_BUG_ON(__AR9170_NUM_BW != 3);
+
+ WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq);
+
+ return &ar9170_phy_freq_params[chanidx].params[bw];
+}
+
+
+int ar9170_init_rf(struct ar9170 *ar)
+{
+ const struct ar9170_phy_freq_params *freqpar;
+ __le32 cmd[7];
+ int err;
+
+ err = ar9170_init_rf_banks_0_7(ar, false);
+ if (err)
+ return err;
+
+ err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20);
+ if (err)
+ return err;
+
+ freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20);
+
+ cmd[0] = cpu_to_le32(2412 * 1000);
+ cmd[1] = cpu_to_le32(0);
+ cmd[2] = cpu_to_le32(1);
+ cmd[3] = cpu_to_le32(freqpar->coeff_exp);
+ cmd[4] = cpu_to_le32(freqpar->coeff_man);
+ cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
+ cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi);
+
+ /* RF_INIT echoes the command back to us */
+ err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT,
+ sizeof(cmd), (u8 *)cmd,
+ sizeof(cmd), (u8 *)cmd);
+ if (err)
+ return err;
+
+ msleep(1000);
+
+ return ar9170_echo_test(ar, 0xaabbccdd);
+}
+
+static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
+{
+ int idx = nfreqs - 2;
+
+ while (idx >= 0) {
+ if (f >= freqs[idx])
+ return idx;
+ idx--;
+ }
+
+ return 0;
+}
+
+static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ /* nothing to interpolate, it's horizontal */
+ if (y2 == y1)
+ return y1;
+
+ /* check if we hit one of the edges */
+ if (x == x1)
+ return y1;
+ if (x == x2)
+ return y2;
+
+ /* x1 == x2 is bad, hopefully == x */
+ if (x2 == x1)
+ return y1;
+
+ return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
+}
+
+static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
+{
+#define SHIFT 8
+ s32 y;
+
+ y = ar9170_interpolate_s32(x << SHIFT,
+ x1 << SHIFT, y1 << SHIFT,
+ x2 << SHIFT, y2 << SHIFT);
+
+ /*
+ * XXX: unwrap this expression
+ * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
+ * Can we rely on the compiler to optimise away the div?
+ */
+ return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
+#undef SHIFT
+}
+
+static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
+{
+ struct ar9170_calibration_target_power_legacy *ctpl;
+ struct ar9170_calibration_target_power_ht *ctph;
+ u8 *ctpres;
+ int ntargets;
+ int idx, i, n;
+ u8 ackpower, ackchains, f;
+ u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
+
+ if (freq < 3000)
+ f = freq - 2300;
+ else
+ f = (freq - 4800)/5;
+
+ /*
+ * cycle through the various modes
+ *
+ * legacy modes first: 5G, 2G CCK, 2G OFDM
+ */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0: /* 5 GHz legacy */
+ ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_leg;
+ break;
+ case 1: /* 2.4 GHz CCK */
+ ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
+ ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
+ ctpres = ar->power_2G_cck;
+ break;
+ case 2: /* 2.4 GHz OFDM */
+ ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ofdm;
+ break;
+ default:
+ BUG();
+ }
+
+ for (n = 0; n < ntargets; n++) {
+ if (ctpl[n].freq == 0xff)
+ break;
+ pwr_freqs[n] = ctpl[n].freq;
+ }
+ ntargets = n;
+ idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
+ for (n = 0; n < 4; n++)
+ ctpres[n] = ar9170_interpolate_u8(
+ f,
+ ctpl[idx + 0].freq,
+ ctpl[idx + 0].power[n],
+ ctpl[idx + 1].freq,
+ ctpl[idx + 1].power[n]);
+ }
+
+ /*
+ * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40
+ */
+ for (i = 0; i < 4; i++) {
+ switch (i) {
+ case 0: /* 5 GHz HT 20 */
+ ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_ht20;
+ break;
+ case 1: /* 5 GHz HT 40 */
+ ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_ht40;
+ break;
+ case 2: /* 2.4 GHz HT 20 */
+ ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ht20;
+ break;
+ case 3: /* 2.4 GHz HT 40 */
+ ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ht40;
+ break;
+ default:
+ BUG();
+ }
+
+ for (n = 0; n < ntargets; n++) {
+ if (ctph[n].freq == 0xff)
+ break;
+ pwr_freqs[n] = ctph[n].freq;
+ }
+ ntargets = n;
+ idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
+ for (n = 0; n < 8; n++)
+ ctpres[n] = ar9170_interpolate_u8(
+ f,
+ ctph[idx + 0].freq,
+ ctph[idx + 0].power[n],
+ ctph[idx + 1].freq,
+ ctph[idx + 1].power[n]);
+ }
+
+ /* set ACK/CTS TX power */
+ ar9170_regwrite_begin(ar);
+
+ if (ar->eeprom.tx_mask != 1)
+ ackchains = AR9170_TX_PHY_TXCHAIN_2;
+ else
+ ackchains = AR9170_TX_PHY_TXCHAIN_1;
+
+ if (freq < 3000)
+ ackpower = ar->power_2G_ofdm[0] & 0x3f;
+ else
+ ackpower = ar->power_5G_leg[0] & 0x3f;
+
+ ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26);
+ ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 |
+ ackpower << 21 | ackchains << 27);
+
+ ar9170_regwrite_finish();
+ return ar9170_regwrite_result();
+}
+
+static int ar9170_calc_noise_dbm(u32 raw_noise)
+{
+ if (raw_noise & 0x100)
+ return ~((raw_noise & 0x0ff) >> 1);
+ else
+ return (raw_noise & 0xff) >> 1;
+}
+
+int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
+ enum ar9170_rf_init_mode rfi, enum ar9170_bw bw)
+{
+ const struct ar9170_phy_freq_params *freqpar;
+ u32 cmd, tmp, offs;
+ __le32 vals[8];
+ int i, err;
+ bool bandswitch;
+
+ /* clear BB heavy clip enable */
+ err = ar9170_write_reg(ar, 0x1c59e0, 0x200);
+ if (err)
+ return err;
+
+ /* may be NULL at first setup */
+ if (ar->channel)
+ bandswitch = ar->channel->band != channel->band;
+ else
+ bandswitch = true;
+
+ /* HW workaround */
+ if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
+ channel->center_freq <= 2417)
+ bandswitch = true;
+
+ err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL);
+ if (err)
+ return err;
+
+ if (rfi != AR9170_RFI_NONE || bandswitch) {
+ u32 val = 0x400;
+
+ if (rfi == AR9170_RFI_COLD)
+ val = 0x800;
+
+ /* warm/cold reset BB/ADDA */
+ err = ar9170_write_reg(ar, 0x1d4004, val);
+ if (err)
+ return err;
+
+ err = ar9170_write_reg(ar, 0x1d4004, 0x0);
+ if (err)
+ return err;
+
+ err = ar9170_init_phy(ar, channel->band);
+ if (err)
+ return err;
+
+ err = ar9170_init_rf_banks_0_7(ar,
+ channel->band == IEEE80211_BAND_5GHZ);
+ if (err)
+ return err;
+
+ cmd = AR9170_CMD_RF_INIT;
+ } else {
+ cmd = AR9170_CMD_FREQUENCY;
+ }
+
+ err = ar9170_init_rf_bank4_pwr(ar,
+ channel->band == IEEE80211_BAND_5GHZ,
+ channel->center_freq, bw);
+ if (err)
+ return err;
+
+ switch (bw) {
+ case AR9170_BW_20:
+ tmp = 0x240;
+ offs = 0;
+ break;
+ case AR9170_BW_40_BELOW:
+ tmp = 0x2c4;
+ offs = 3;
+ break;
+ case AR9170_BW_40_ABOVE:
+ tmp = 0x2d4;
+ offs = 1;
+ break;
+ default:
+ BUG();
+ return -ENOSYS;
+ }
+
+ if (0 /* 2 streams capable */)
+ tmp |= 0x100;
+
+ err = ar9170_write_reg(ar, 0x1c5804, tmp);
+ if (err)
+ return err;
+
+ err = ar9170_set_power_cal(ar, channel->center_freq, bw);
+ if (err)
+ return err;
+
+ freqpar = ar9170_get_hw_dyn_params(channel, bw);
+
+ vals[0] = cpu_to_le32(channel->center_freq * 1000);
+ vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1);
+ vals[2] = cpu_to_le32(offs << 2 | 1);
+ vals[3] = cpu_to_le32(freqpar->coeff_exp);
+ vals[4] = cpu_to_le32(freqpar->coeff_man);
+ vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
+ vals[6] = cpu_to_le32(freqpar->coeff_man_shgi);
+ vals[7] = cpu_to_le32(1000);
+
+ err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals,
+ sizeof(vals), (u8 *)vals);
+ if (err)
+ return err;
+
+ for (i = 0; i < 2; i++) {
+ ar->noise[i] = ar9170_calc_noise_dbm(
+ (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
+
+ ar->noise[i + 2] = ar9170_calc_noise_dbm(
+ (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff);
+ }
+
+ ar->channel = channel;
+ return 0;
+}
diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c
new file mode 100644
index 000000000000..ad296840893e
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.c
@@ -0,0 +1,748 @@
+/*
+ * Atheros AR9170 driver
+ *
+ * USB - frontend
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, Christian Lamparter <chunkeey@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "ar9170.h"
+#include "cmd.h"
+#include "hw.h"
+#include "usb.h"
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
+MODULE_FIRMWARE("ar9170-1.fw");
+MODULE_FIRMWARE("ar9170-2.fw");
+
+static struct usb_device_id ar9170_usb_ids[] = {
+ /* Atheros 9170 */
+ { USB_DEVICE(0x0cf3, 0x9170) },
+ /* Atheros TG121N */
+ { USB_DEVICE(0x0cf3, 0x1001) },
+ /* D-Link DWA 160A */
+ { USB_DEVICE(0x07d1, 0x3c10) },
+ /* Netgear WNDA3100 */
+ { USB_DEVICE(0x0846, 0x9010) },
+ /* Netgear WN111 v2 */
+ { USB_DEVICE(0x0846, 0x9001) },
+ /* Zydas ZD1221 */
+ { USB_DEVICE(0x0ace, 0x1221) },
+ /* Z-Com UB81 BG */
+ { USB_DEVICE(0x0cde, 0x0023) },
+ /* Z-Com UB82 ABG */
+ { USB_DEVICE(0x0cde, 0x0026) },
+ /* Arcadyan WN7512 */
+ { USB_DEVICE(0x083a, 0xf522) },
+ /* Planex GWUS300 */
+ { USB_DEVICE(0x2019, 0x5304) },
+ /* IO-Data WNGDNUS2 */
+ { USB_DEVICE(0x04bb, 0x093f) },
+
+ /* terminate */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
+
+static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ar9170_usb *aru = (struct ar9170_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+
+ if (!aru) {
+ dev_kfree_skb_irq(skb);
+ return ;
+ }
+
+ ar9170_handle_tx_status(&aru->common, skb, false,
+ AR9170_TX_STATUS_COMPLETE);
+}
+
+static void ar9170_usb_tx_urb_complete(struct urb *urb)
+{
+}
+
+static void ar9170_usb_irq_completed(struct urb *urb)
+{
+ struct ar9170_usb *aru = urb->context;
+
+ switch (urb->status) {
+ /* everything is fine */
+ case 0:
+ break;
+
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+
+ default:
+ goto resubmit;
+ }
+
+ print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET,
+ urb->transfer_buffer, urb->actual_length);
+
+resubmit:
+ usb_anchor_urb(urb, &aru->rx_submitted);
+ if (usb_submit_urb(urb, GFP_ATOMIC)) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+
+free:
+ usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
+}
+
+static void ar9170_usb_rx_completed(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ar9170_usb *aru = (struct ar9170_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int err;
+
+ if (!aru)
+ goto free;
+
+ switch (urb->status) {
+ /* everything is fine */
+ case 0:
+ break;
+
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+
+ default:
+ goto resubmit;
+ }
+
+ skb_put(skb, urb->actual_length);
+ ar9170_rx(&aru->common, skb);
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &aru->rx_submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ usb_unanchor_urb(urb);
+ dev_kfree_skb_irq(skb);
+ }
+
+ return ;
+
+free:
+ dev_kfree_skb_irq(skb);
+ return;
+}
+
+static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
+ struct urb *urb, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp);
+ if (!skb)
+ return -ENOMEM;
+
+ /* reserve some space for mac80211's radiotap */
+ skb_reserve(skb, 32);
+
+ usb_fill_bulk_urb(urb, aru->udev,
+ usb_rcvbulkpipe(aru->udev, AR9170_EP_RX),
+ skb->data, min(skb_tailroom(skb),
+ AR9170_MAX_RX_BUFFER_SIZE),
+ ar9170_usb_rx_completed, skb);
+
+ return 0;
+}
+
+static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
+{
+ struct urb *urb = NULL;
+ void *ibuf;
+ int err = -ENOMEM;
+
+ /* initialize interrupt endpoint */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ goto out;
+
+ ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
+ if (!ibuf)
+ goto out;
+
+ usb_fill_int_urb(urb, aru->udev,
+ usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf,
+ 64, ar9170_usb_irq_completed, aru, 1);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &aru->rx_submitted);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
+ urb->transfer_dma);
+ }
+
+out:
+ usb_free_urb(urb);
+ return err;
+}
+
+static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru)
+{
+ struct urb *urb;
+ int i;
+ int err = -EINVAL;
+
+ for (i = 0; i < AR9170_NUM_RX_URBS; i++) {
+ err = -ENOMEM;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ goto err_out;
+
+ err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL);
+ if (err) {
+ usb_free_urb(urb);
+ goto err_out;
+ }
+
+ usb_anchor_urb(urb, &aru->rx_submitted);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ dev_kfree_skb_any((void *) urb->transfer_buffer);
+ usb_free_urb(urb);
+ goto err_out;
+ }
+ usb_free_urb(urb);
+ }
+
+ /* the device now waiting for a firmware. */
+ aru->common.state = AR9170_IDLE;
+ return 0;
+
+err_out:
+
+ usb_kill_anchored_urbs(&aru->rx_submitted);
+ return err;
+}
+
+static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
+{
+ int ret;
+
+ aru->common.state = AR9170_UNKNOWN_STATE;
+
+ usb_unlink_anchored_urbs(&aru->tx_submitted);
+
+ /* give the LED OFF command and the deauth frame a chance to air. */
+ ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
+ msecs_to_jiffies(100));
+ if (ret == 0)
+ dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
+ usb_poison_anchored_urbs(&aru->tx_submitted);
+
+ usb_poison_anchored_urbs(&aru->rx_submitted);
+}
+
+static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
+ unsigned int plen, void *payload,
+ unsigned int outlen, void *out)
+{
+ struct ar9170_usb *aru = (void *) ar;
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ if (unlikely(!IS_ACCEPTING_CMD(ar)))
+ return -EPERM;
+
+ if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
+ return -EINVAL;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (unlikely(!urb))
+ goto err_free;
+
+ ar->cmdbuf[0] = cpu_to_le32(plen);
+ ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
+ /* writing multiple regs fills this buffer already */
+ if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
+ memcpy(&ar->cmdbuf[1], payload, plen);
+
+ spin_lock_irqsave(&aru->common.cmdlock, flags);
+ aru->readbuf = (u8 *)out;
+ aru->readlen = outlen;
+ spin_unlock_irqrestore(&aru->common.cmdlock, flags);
+
+ usb_fill_int_urb(urb, aru->udev,
+ usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
+ aru->common.cmdbuf, plen + 4,
+ ar9170_usb_tx_urb_complete, NULL, 1);
+
+ usb_anchor_urb(urb, &aru->tx_submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto err_unbuf;
+ }
+ usb_free_urb(urb);
+
+ err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
+ if (err == 0) {
+ err = -ETIMEDOUT;
+ goto err_unbuf;
+ }
+
+ if (outlen >= 0 && aru->readlen != outlen) {
+ err = -EMSGSIZE;
+ goto err_unbuf;
+ }
+
+ return 0;
+
+err_unbuf:
+ /* Maybe the device was removed in the second we were waiting? */
+ if (IS_STARTED(ar)) {
+ dev_err(&aru->udev->dev, "no command feedback "
+ "received (%d).\n", err);
+
+ /* provide some maybe useful debug information */
+ print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
+ aru->common.cmdbuf, plen + 4);
+ dump_stack();
+ }
+
+ /* invalidate to avoid completing the next prematurely */
+ spin_lock_irqsave(&aru->common.cmdlock, flags);
+ aru->readbuf = NULL;
+ aru->readlen = 0;
+ spin_unlock_irqrestore(&aru->common.cmdlock, flags);
+
+err_free:
+
+ return err;
+}
+
+static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
+ bool txstatus_needed, unsigned int extra_len)
+{
+ struct ar9170_usb *aru = (struct ar9170_usb *) ar;
+ struct urb *urb;
+ int err;
+
+ if (unlikely(!IS_STARTED(ar))) {
+ /* Seriously, what were you drink... err... thinking!? */
+ return -EPERM;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (unlikely(!urb))
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(urb, aru->udev,
+ usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
+ skb->data, skb->len + extra_len, (txstatus_needed ?
+ ar9170_usb_tx_urb_complete :
+ ar9170_usb_tx_urb_complete_free), skb);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
+ usb_anchor_urb(urb, &aru->tx_submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err))
+ usb_unanchor_urb(urb);
+
+ usb_free_urb(urb);
+ return err;
+}
+
+static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
+{
+ struct ar9170_usb *aru = (void *) ar;
+ unsigned long flags;
+ u32 in, out;
+
+ if (!buffer)
+ return ;
+
+ in = le32_to_cpup((__le32 *)buffer);
+ out = le32_to_cpu(ar->cmdbuf[0]);
+
+ /* mask off length byte */
+ out &= ~0xFF;
+
+ if (aru->readlen >= 0) {
+ /* add expected length */
+ out |= aru->readlen;
+ } else {
+ /* add obtained length */
+ out |= in & 0xFF;
+ }
+
+ /*
+ * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response
+ * length and we cannot predict the correct length in advance.
+ * So we only check if we provided enough space for the data.
+ */
+ if (unlikely(out < in)) {
+ dev_warn(&aru->udev->dev, "received invalid command response "
+ "got %d bytes, instead of %d bytes "
+ "and the resp length is %d bytes\n",
+ in, out, len);
+ print_hex_dump_bytes("ar9170 invalid resp: ",
+ DUMP_PREFIX_OFFSET, buffer, len);
+ /*
+ * Do not complete, then the command times out,
+ * and we get a stack trace from there.
+ */
+ return ;
+ }
+
+ spin_lock_irqsave(&aru->common.cmdlock, flags);
+ if (aru->readbuf && len > 0) {
+ memcpy(aru->readbuf, buffer + 4, len - 4);
+ aru->readbuf = NULL;
+ }
+ complete(&aru->cmd_wait);
+ spin_unlock_irqrestore(&aru->common.cmdlock, flags);
+}
+
+static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
+ size_t len, u32 addr, bool complete)
+{
+ int transfer, err;
+ u8 *buf = kmalloc(4096, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(int, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
+ 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, 1000);
+
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ if (complete) {
+ err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
+ 0x31 /* FW DL COMPLETE */,
+ 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000);
+ }
+
+ return 0;
+}
+
+static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
+{
+ int err = 0;
+
+ err = request_firmware(&aru->init_values, "ar9170-1.fw",
+ &aru->udev->dev);
+ if (err) {
+ dev_err(&aru->udev->dev, "file with init values not found.\n");
+ return err;
+ }
+
+ err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
+ if (err) {
+ release_firmware(aru->init_values);
+ dev_err(&aru->udev->dev, "firmware file not found.\n");
+ return err;
+ }
+
+ return err;
+}
+
+static int ar9170_usb_reset(struct ar9170_usb *aru)
+{
+ int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
+
+ if (lock) {
+ ret = usb_lock_device_for_reset(aru->udev, aru->intf);
+ if (ret < 0) {
+ dev_err(&aru->udev->dev, "unable to lock device "
+ "for reset (%d).\n", ret);
+ return ret;
+ }
+ }
+
+ ret = usb_reset_device(aru->udev);
+ if (lock)
+ usb_unlock_device(aru->udev);
+
+ /* let it rest - for a second - */
+ msleep(1000);
+
+ return ret;
+}
+
+static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
+{
+ int err;
+
+ /* First, upload initial values to device RAM */
+ err = ar9170_usb_upload(aru, aru->init_values->data,
+ aru->init_values->size, 0x102800, false);
+ if (err) {
+ dev_err(&aru->udev->dev, "firmware part 1 "
+ "upload failed (%d).\n", err);
+ return err;
+ }
+
+ /* Then, upload the firmware itself and start it */
+ return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
+ 0x200000, true);
+}
+
+static int ar9170_usb_init_transport(struct ar9170_usb *aru)
+{
+ struct ar9170 *ar = (void *) &aru->common;
+ int err;
+
+ ar9170_regwrite_begin(ar);
+
+ /* Set USB Rx stream mode MAX packet number to 2 */
+ ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4);
+
+ /* Set USB Rx stream mode timeout to 10us */
+ ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80);
+
+ ar9170_regwrite_finish();
+
+ err = ar9170_regwrite_result();
+ if (err)
+ dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err);
+
+ return err;
+}
+
+static void ar9170_usb_stop(struct ar9170 *ar)
+{
+ struct ar9170_usb *aru = (void *) ar;
+ int ret;
+
+ if (IS_ACCEPTING_CMD(ar))
+ aru->common.state = AR9170_STOPPED;
+
+ /* lets wait a while until the tx - queues are dried out */
+ ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
+
+ usb_poison_anchored_urbs(&aru->tx_submitted);
+
+ /*
+ * Note:
+ * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
+ * Else we would end up with a unresponsive device...
+ */
+}
+
+static int ar9170_usb_open(struct ar9170 *ar)
+{
+ struct ar9170_usb *aru = (void *) ar;
+ int err;
+
+ usb_unpoison_anchored_urbs(&aru->tx_submitted);
+ err = ar9170_usb_init_transport(aru);
+ if (err) {
+ usb_poison_anchored_urbs(&aru->tx_submitted);
+ return err;
+ }
+
+ aru->common.state = AR9170_IDLE;
+ return 0;
+}
+
+static int ar9170_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct ar9170_usb *aru;
+ struct ar9170 *ar;
+ struct usb_device *udev;
+ int err;
+
+ aru = ar9170_alloc(sizeof(*aru));
+ if (IS_ERR(aru)) {
+ err = PTR_ERR(aru);
+ goto out;
+ }
+
+ udev = interface_to_usbdev(intf);
+ usb_get_dev(udev);
+ aru->udev = udev;
+ aru->intf = intf;
+ ar = &aru->common;
+
+ usb_set_intfdata(intf, aru);
+ SET_IEEE80211_DEV(ar->hw, &udev->dev);
+
+ init_usb_anchor(&aru->rx_submitted);
+ init_usb_anchor(&aru->tx_submitted);
+ init_completion(&aru->cmd_wait);
+
+ aru->common.stop = ar9170_usb_stop;
+ aru->common.open = ar9170_usb_open;
+ aru->common.tx = ar9170_usb_tx;
+ aru->common.exec_cmd = ar9170_usb_exec_cmd;
+ aru->common.callback_cmd = ar9170_usb_callback_cmd;
+
+ err = ar9170_usb_reset(aru);
+ if (err)
+ goto err_unlock;
+
+ err = ar9170_usb_request_firmware(aru);
+ if (err)
+ goto err_unlock;
+
+ err = ar9170_usb_alloc_rx_irq_urb(aru);
+ if (err)
+ goto err_freefw;
+
+ err = ar9170_usb_alloc_rx_bulk_urbs(aru);
+ if (err)
+ goto err_unrx;
+
+ err = ar9170_usb_upload_firmware(aru);
+ if (err) {
+ err = ar9170_echo_test(&aru->common, 0x60d43110);
+ if (err) {
+ /* force user invention, by disabling the device */
+ err = usb_driver_set_configuration(aru->udev, -1);
+ dev_err(&aru->udev->dev, "device is in a bad state. "
+ "please reconnect it!\n");
+ goto err_unrx;
+ }
+ }
+
+ err = ar9170_usb_open(ar);
+ if (err)
+ goto err_unrx;
+
+ err = ar9170_register(ar, &udev->dev);
+
+ ar9170_usb_stop(ar);
+ if (err)
+ goto err_unrx;
+
+ return 0;
+
+err_unrx:
+ ar9170_usb_cancel_urbs(aru);
+
+err_freefw:
+ release_firmware(aru->init_values);
+ release_firmware(aru->firmware);
+
+err_unlock:
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+ ieee80211_free_hw(ar->hw);
+out:
+ return err;
+}
+
+static void ar9170_usb_disconnect(struct usb_interface *intf)
+{
+ struct ar9170_usb *aru = usb_get_intfdata(intf);
+
+ if (!aru)
+ return;
+
+ aru->common.state = AR9170_IDLE;
+ ar9170_unregister(&aru->common);
+ ar9170_usb_cancel_urbs(aru);
+
+ release_firmware(aru->init_values);
+ release_firmware(aru->firmware);
+
+ usb_put_dev(aru->udev);
+ usb_set_intfdata(intf, NULL);
+ ieee80211_free_hw(aru->common.hw);
+}
+
+static struct usb_driver ar9170_driver = {
+ .name = "ar9170usb",
+ .probe = ar9170_usb_probe,
+ .disconnect = ar9170_usb_disconnect,
+ .id_table = ar9170_usb_ids,
+ .soft_unbind = 1,
+};
+
+static int __init ar9170_init(void)
+{
+ return usb_register(&ar9170_driver);
+}
+
+static void __exit ar9170_exit(void)
+{
+ usb_deregister(&ar9170_driver);
+}
+
+module_init(ar9170_init);
+module_exit(ar9170_exit);
diff --git a/drivers/net/wireless/ar9170/usb.h b/drivers/net/wireless/ar9170/usb.h
new file mode 100644
index 000000000000..f5852924cd64
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.h
@@ -0,0 +1,74 @@
+/*
+ * Atheros AR9170 USB driver
+ *
+ * Driver specific definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, Christian Lamparter <chunkeey@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __USB_H
+#define __USB_H
+
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <net/wireless.h>
+#include <net/mac80211.h>
+#include <linux/firmware.h>
+#include "eeprom.h"
+#include "hw.h"
+#include "ar9170.h"
+
+#define AR9170_NUM_RX_URBS 16
+
+struct firmware;
+
+struct ar9170_usb {
+ struct ar9170 common;
+ struct usb_device *udev;
+ struct usb_interface *intf;
+
+ struct usb_anchor rx_submitted;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t cmdlock;
+ struct completion cmd_wait;
+ int readlen;
+ u8 *readbuf;
+
+ const struct firmware *init_values;
+ const struct firmware *firmware;
+};
+
+#endif /* __USB_H */
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index 14c11656e82c..a54a67c425c8 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1030,7 +1030,17 @@ static int arlan_mac_addr(struct net_device *dev, void *p)
return 0;
}
-
+static const struct net_device_ops arlan_netdev_ops = {
+ .ndo_open = arlan_open,
+ .ndo_stop = arlan_close,
+ .ndo_start_xmit = arlan_tx,
+ .ndo_get_stats = arlan_statistics,
+ .ndo_set_multicast_list = arlan_set_multicast,
+ .ndo_change_mtu = arlan_change_mtu,
+ .ndo_set_mac_address = arlan_mac_addr,
+ .ndo_tx_timeout = arlan_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
static int __init arlan_setup_device(struct net_device *dev, int num)
{
@@ -1042,14 +1052,7 @@ static int __init arlan_setup_device(struct net_device *dev, int num)
ap->conf = (struct arlan_shmem *)(ap+1);
dev->tx_queue_len = tx_queue_len;
- dev->open = arlan_open;
- dev->stop = arlan_close;
- dev->hard_start_xmit = arlan_tx;
- dev->get_stats = arlan_statistics;
- dev->set_multicast_list = arlan_set_multicast;
- dev->change_mtu = arlan_change_mtu;
- dev->set_mac_address = arlan_mac_addr;
- dev->tx_timeout = arlan_tx_timeout;
+ dev->netdev_ops = &arlan_netdev_ops;
dev->watchdog_timeo = 3*HZ;
ap->irq_test_done = 0;
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 0dc2c7321c8b..0b616e72fe05 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -204,9 +204,9 @@
#define AR5K_TUNE_CWMAX_11B 1023
#define AR5K_TUNE_CWMAX_XR 7
#define AR5K_TUNE_NOISE_FLOOR -72
-#define AR5K_TUNE_MAX_TXPOWER 60
-#define AR5K_TUNE_DEFAULT_TXPOWER 30
-#define AR5K_TUNE_TPC_TXPOWER true
+#define AR5K_TUNE_MAX_TXPOWER 63
+#define AR5K_TUNE_DEFAULT_TXPOWER 25
+#define AR5K_TUNE_TPC_TXPOWER false
#define AR5K_TUNE_ANT_DIVERSITY true
#define AR5K_TUNE_HWTXTRIES 4
@@ -551,11 +551,11 @@ enum ath5k_pkt_type {
*/
#define AR5K_TXPOWER_OFDM(_r, _v) ( \
((0 & 1) << ((_v) + 6)) | \
- (((ah->ah_txpower.txp_rates[(_r)]) & 0x3f) << (_v)) \
+ (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \
)
#define AR5K_TXPOWER_CCK(_r, _v) ( \
- (ah->ah_txpower.txp_rates[(_r)] & 0x3f) << (_v) \
+ (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
/*
@@ -1085,13 +1085,25 @@ struct ath5k_hw {
struct ath5k_gain ah_gain;
u8 ah_offset[AR5K_MAX_RF_BANKS];
+
struct {
- u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE];
- u16 txp_rates[AR5K_MAX_RATES];
- s16 txp_min;
- s16 txp_max;
+ /* Temporary tables used for interpolation */
+ u8 tmpL[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_POWER_TABLE_SIZE];
+ u8 tmpR[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_POWER_TABLE_SIZE];
+ u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2];
+ u16 txp_rates_power_table[AR5K_MAX_RATES];
+ u8 txp_min_idx;
bool txp_tpc;
+ /* Values in 0.25dB units */
+ s16 txp_min_pwr;
+ s16 txp_max_pwr;
+ s16 txp_offset;
s16 txp_ofdm;
+ /* Values in dB units */
+ s16 txp_cck_ofdm_pwr_delta;
+ s16 txp_cck_ofdm_gainf_delta;
} ah_txpower;
struct {
@@ -1161,6 +1173,7 @@ extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_l
/* EEPROM access functions */
extern int ath5k_eeprom_init(struct ath5k_hw *ah);
+extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
@@ -1256,8 +1269,8 @@ extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* TX power setup */
-extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power);
+extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
+extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 ee_mode, u8 txpower);
/*
* Functions used internaly
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
index 656cb9dc833b..70d376c63aac 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -341,6 +341,8 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
if (ah->ah_rf_banks != NULL)
kfree(ah->ah_rf_banks);
+ ath5k_eeprom_detach(ah);
+
/* assume interrupts are down */
kfree(ah);
}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index cad3ccf61b00..5d57d774e466 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -685,13 +685,6 @@ ath5k_pci_resume(struct pci_dev *pdev)
if (err)
return err;
- /*
- * Suspend/Resume resets the PCI configuration space, so we have to
- * re-disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state
- */
- pci_write_config_byte(pdev, 0x41, 0);
-
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
@@ -1095,9 +1088,18 @@ ath5k_mode_setup(struct ath5k_softc *sc)
static inline int
ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
- WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
- "hw_rix out of bounds: %x\n", hw_rix);
- return sc->rate_idx[sc->curband->band][hw_rix];
+ int rix;
+
+ /* return base rate on errors */
+ if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
+ "hw_rix out of bounds: %x\n", hw_rix))
+ return 0;
+
+ rix = sc->rate_idx[sc->curband->band][hw_rix];
+ if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
+ rix = 0;
+
+ return rix;
}
/***************\
@@ -1216,6 +1218,9 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
pktlen = skb->len;
+ /* FIXME: If we are in g mode and rate is a CCK rate
+ * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
+ * from tx power (value is in dB units already) */
if (info->control.hw_key) {
keyidx = info->control.hw_key->hw_key_idx;
pktlen += info->control.hw_key->icv_len;
@@ -2044,6 +2049,9 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
antenna = sc->bsent & 4 ? 2 : 1;
}
+ /* FIXME: If we are in g mode and rate is a CCK rate
+ * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
+ * from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb),
@@ -2305,7 +2313,7 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+ AR5K_INT_FATAL | AR5K_INT_GLOBAL;
ret = ath5k_reset(sc, false, false);
if (ret)
goto done;
@@ -2554,7 +2562,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (skb_headroom(skb) < padsize) {
ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
" headroom to pad %d\n", hdrlen, padsize);
- return NETDEV_TX_BUSY;
+ goto drop_packet;
}
skb_push(skb, padsize);
memmove(skb->data, skb->data+padsize, hdrlen);
@@ -2565,7 +2573,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
spin_unlock_irqrestore(&sc->txbuflock, flags);
ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
- return NETDEV_TX_BUSY;
+ goto drop_packet;
}
bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
list_del(&bf->list);
@@ -2582,10 +2590,12 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
list_add_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock_irqrestore(&sc->txbuflock, flags);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto drop_packet;
}
+ return NETDEV_TX_OK;
+drop_packet:
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2608,12 +2618,6 @@ ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
goto err;
}
- /*
- * This is needed only to setup initial state
- * but it's best done after a reset.
- */
- ath5k_hw_set_txpower_limit(sc->ah, 0);
-
ret = ath5k_rx_start(sc);
if (ret) {
ATH5K_ERR(sc, "can't start recv logic\n");
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 20e0d14b41ec..822956114cd7 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -112,7 +112,7 @@ struct ath5k_softc {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
- u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
enum nl80211_iftype opmode;
struct ath5k_hw *ah; /* Atheros HW */
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index b40a9287a39a..dc30a2b70a6b 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -194,6 +194,10 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return -EINVAL;
}
+ tx_power += ah->ah_txpower.txp_offset;
+ if (tx_power > AR5K_TUNE_MAX_TXPOWER)
+ tx_power = AR5K_TUNE_MAX_TXPOWER;
+
/* Clear descriptor */
memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
index ac45ca47ca87..c0fb3b09ba45 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
- * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
- * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -98,11 +98,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
int ret;
u16 val;
- /* Initial TX thermal adjustment values */
- ee->ee_tx_clip = 4;
- ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
- ee->ee_gain_select = 1;
-
/*
* Read values from EEPROM and store them in the capability structure
*/
@@ -241,22 +236,22 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
switch(mode) {
case AR5K_EEPROM_MODE_11A:
- ee->ee_ob[mode][3] = (val >> 5) & 0x7;
- ee->ee_db[mode][3] = (val >> 2) & 0x7;
- ee->ee_ob[mode][2] = (val << 1) & 0x7;
+ ee->ee_ob[mode][3] = (val >> 5) & 0x7;
+ ee->ee_db[mode][3] = (val >> 2) & 0x7;
+ ee->ee_ob[mode][2] = (val << 1) & 0x7;
AR5K_EEPROM_READ(o++, val);
- ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
- ee->ee_db[mode][2] = (val >> 12) & 0x7;
- ee->ee_ob[mode][1] = (val >> 9) & 0x7;
- ee->ee_db[mode][1] = (val >> 6) & 0x7;
- ee->ee_ob[mode][0] = (val >> 3) & 0x7;
- ee->ee_db[mode][0] = val & 0x7;
+ ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
+ ee->ee_db[mode][2] = (val >> 12) & 0x7;
+ ee->ee_ob[mode][1] = (val >> 9) & 0x7;
+ ee->ee_db[mode][1] = (val >> 6) & 0x7;
+ ee->ee_ob[mode][0] = (val >> 3) & 0x7;
+ ee->ee_db[mode][0] = val & 0x7;
break;
case AR5K_EEPROM_MODE_11G:
case AR5K_EEPROM_MODE_11B:
- ee->ee_ob[mode][1] = (val >> 4) & 0x7;
- ee->ee_db[mode][1] = val & 0x7;
+ ee->ee_ob[mode][1] = (val >> 4) & 0x7;
+ ee->ee_db[mode][1] = val & 0x7;
break;
}
@@ -504,35 +499,6 @@ ath5k_eeprom_init_modes(struct ath5k_hw *ah)
return 0;
}
-/* Used to match PCDAC steps with power values on RF5111 chips
- * (eeprom versions < 4). For RF5111 we have 10 pre-defined PCDAC
- * steps that match with the power values we read from eeprom. On
- * older eeprom versions (< 3.2) these steps are equaly spaced at
- * 10% of the pcdac curve -until the curve reaches it's maximum-
- * (10 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
- * these 10 steps are spaced in a different way. This function returns
- * the pcdac steps based on eeprom version and curve min/max so that we
- * can have pcdac/pwr points.
- */
-static inline void
-ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
-{
- static const u16 intercepts3[] =
- { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
- static const u16 intercepts3_2[] =
- { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
- const u16 *ip;
- int i;
-
- if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
- ip = intercepts3_2;
- else
- ip = intercepts3;
-
- for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
- *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100;
-}
-
/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
* frequency mask) */
static inline int
@@ -546,26 +512,25 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
int ret;
u16 val;
+ ee->ee_n_piers[mode] = 0;
while(i < max) {
AR5K_EEPROM_READ(o++, val);
- freq1 = (val >> 8) & 0xff;
- freq2 = val & 0xff;
-
- if (freq1) {
- pc[i++].freq = ath5k_eeprom_bin2freq(ee,
- freq1, mode);
- ee->ee_n_piers[mode]++;
- }
+ freq1 = val & 0xff;
+ if (!freq1)
+ break;
- if (freq2) {
- pc[i++].freq = ath5k_eeprom_bin2freq(ee,
- freq2, mode);
- ee->ee_n_piers[mode]++;
- }
+ pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+ freq1, mode);
+ ee->ee_n_piers[mode]++;
- if (!freq1 || !freq2)
+ freq2 = (val >> 8) & 0xff;
+ if (!freq2)
break;
+
+ pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+ freq2, mode);
+ ee->ee_n_piers[mode]++;
}
/* return new offset */
@@ -652,13 +617,122 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
return 0;
}
-/* Read power calibration for RF5111 chips
+/*
+ * Read power calibration for RF5111 chips
+ *
* For RF5111 we have an XPD -eXternal Power Detector- curve
- * for each calibrated channel. Each curve has PCDAC steps on
- * x axis and power on y axis and looks like a logarithmic
- * function. To recreate the curve and pass the power values
- * on the pcdac table, we read 10 points here and interpolate later.
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * here and interpolate later.
*/
+
+/* Used to match PCDAC steps with power values on RF5111 chips
+ * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
+ * steps that match with the power values we read from eeprom. On
+ * older eeprom versions (< 3.2) these steps are equaly spaced at
+ * 10% of the pcdac curve -until the curve reaches it's maximum-
+ * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
+ * these 11 steps are spaced in a different way. This function returns
+ * the pcdac steps based on eeprom version and curve min/max so that we
+ * can have pcdac/pwr points.
+ */
+static inline void
+ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
+{
+ const static u16 intercepts3[] =
+ { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
+ const static u16 intercepts3_2[] =
+ { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
+ const u16 *ip;
+ int i;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
+ ip = intercepts3_2;
+ else
+ ip = intercepts3;
+
+ for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
+ vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
+}
+
+/* Convert RF5111 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf5111 *pcinfo;
+ struct ath5k_pdgain_info *pd;
+ u8 pier, point, idx;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf5111_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ return -ENOMEM;
+
+ /* Only one curve for RF5111
+ * find out which one and place
+ * in in pd_curves.
+ * Note: ee_x_gain is reversed here */
+ for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
+
+ if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) {
+ pdgain_idx[0] = idx;
+ break;
+ }
+ }
+
+ ee->ee_pd_gains[mode] = 1;
+
+ pd = &chinfo[pier].pd_curves[idx];
+
+ pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
+ sizeof(u8), GFP_KERNEL);
+ if (!pd->pd_step)
+ return -ENOMEM;
+
+ pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
+ sizeof(s16), GFP_KERNEL);
+ if (!pd->pd_pwr)
+ return -ENOMEM;
+
+ /* Fill raw dataset
+ * (convert power to 0.25dB units
+ * for RF5112 combatibility) */
+ for (point = 0; point < pd->pd_points; point++) {
+
+ /* Absolute values */
+ pd->pd_pwr[point] = 2 * pcinfo->pwr[point];
+
+ /* Already sorted */
+ pd->pd_step[point] = pcinfo->pcdac[point];
+ }
+
+ /* Set min/max pwr */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+ chinfo[pier].max_pwr = pd->pd_pwr[10];
+
+ }
+
+ return 0;
+}
+
+/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
{
@@ -747,30 +821,165 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
cdata->pcdac_max, cdata->pcdac);
}
- return 0;
+ return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal);
}
-/* Read power calibration for RF5112 chips
+
+/*
+ * Read power calibration for RF5112 chips
+ *
* For RF5112 we have 4 XPD -eXternal Power Detector- curves
* for each calibrated channel on 0, -6, -12 and -18dbm but we only
- * use the higher (3) and the lower (0) curves. Each curve has PCDAC
- * steps on x axis and power on y axis and looks like a linear
- * function. To recreate the curve and pass the power values
- * on the pcdac table, we read 4 points for xpd 0 and 3 points
- * for xpd 3 here and interpolate later.
+ * use the higher (3) and the lower (0) curves. Each curve has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we read 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) here and
+ * interpolate later.
*
* Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
*/
+
+/* Convert RF5112 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf5112 *pcinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ unsigned int pier, pdg, point;
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf5112_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ return -ENOMEM;
+
+ /* Fill pd_curves */
+ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+
+ u8 idx = pdgain_idx[pdg];
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[idx];
+
+ /* Lowest gain curve (max power) */
+ if (pdg == 0) {
+ /* One more point for better accuracy */
+ pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ return -ENOMEM;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ return -ENOMEM;
+
+
+ /* Fill raw dataset
+ * (all power levels are in 0.25dB units) */
+ pd->pd_step[0] = pcinfo->pcdac_x0[0];
+ pd->pd_pwr[0] = pcinfo->pwr_x0[0];
+
+ for (point = 1; point < pd->pd_points;
+ point++) {
+ /* Absolute values */
+ pd->pd_pwr[point] =
+ pcinfo->pwr_x0[point];
+
+ /* Deltas */
+ pd->pd_step[point] =
+ pd->pd_step[point - 1] +
+ pcinfo->pcdac_x0[point];
+ }
+
+ /* Set min power for this frequency */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+
+ /* Highest gain curve (min power) */
+ } else if (pdg == 1) {
+
+ pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ return -ENOMEM;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ return -ENOMEM;
+
+ /* Fill raw dataset
+ * (all power levels are in 0.25dB units) */
+ for (point = 0; point < pd->pd_points;
+ point++) {
+ /* Absolute values */
+ pd->pd_pwr[point] =
+ pcinfo->pwr_x3[point];
+
+ /* Fixed points */
+ pd->pd_step[point] =
+ pcinfo->pcdac_x3[point];
+ }
+
+ /* Since we have a higher gain curve
+ * override min power */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
struct ath5k_chan_pcal_info *gen_chan_info;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
- unsigned int i, c;
+ u8 i, c;
u16 val;
int ret;
+ u8 pd_gains = 0;
+
+ /* Count how many curves we have and
+ * identify them (which one of the 4
+ * available curves we have on each count).
+ * Curves are stored from lower (x0) to
+ * higher (x3) gain */
+ for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) {
+ /* ee_x_gain[mode] is x gain mask */
+ if ((ee->ee_x_gain[mode] >> i) & 0x1)
+ pdgain_idx[pd_gains++] = i;
+ }
+ ee->ee_pd_gains[mode] = pd_gains;
+
+ if (pd_gains == 0 || pd_gains > 2)
+ return -EINVAL;
switch (mode) {
case AR5K_EEPROM_MODE_11A:
@@ -808,13 +1017,13 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
for (i = 0; i < ee->ee_n_piers[mode]; i++) {
chan_pcal_info = &gen_chan_info[i].rf5112_info;
- /* Power values in dBm * 4
+ /* Power values in quarter dB
* for the lower xpd gain curve
* (0 dBm -> higher output power) */
for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr_x0[c] = (val & 0xff);
- chan_pcal_info->pwr_x0[++c] = ((val >> 8) & 0xff);
+ chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff);
+ chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff);
}
/* PCDAC steps
@@ -825,12 +1034,12 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
- /* Power values in dBm * 4
+ /* Power values in quarter dB
* for the higher xpd gain curve
* (18 dBm -> lower output power) */
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr_x3[0] = (val & 0xff);
- chan_pcal_info->pwr_x3[1] = ((val >> 8) & 0xff);
+ chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff);
+ chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff);
AR5K_EEPROM_READ(offset++, val);
chan_pcal_info->pwr_x3[2] = (val & 0xff);
@@ -843,24 +1052,36 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
chan_pcal_info->pcdac_x3[2] = 63;
if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
- chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0xff);
+ chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f);
/* Last xpd0 power level is also channel maximum */
gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
} else {
chan_pcal_info->pcdac_x0[0] = 1;
- gen_chan_info[i].max_pwr = ((val >> 8) & 0xff);
+ gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff);
}
- /* Recreate pcdac_x0 table for this channel using pcdac steps */
- chan_pcal_info->pcdac_x0[1] += chan_pcal_info->pcdac_x0[0];
- chan_pcal_info->pcdac_x0[2] += chan_pcal_info->pcdac_x0[1];
- chan_pcal_info->pcdac_x0[3] += chan_pcal_info->pcdac_x0[2];
}
- return 0;
+ return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info);
}
+
+/*
+ * Read power calibration for RF2413 chips
+ *
+ * For RF2413 we have a Power to PDDAC table (Power Detector)
+ * instead of a PCDAC and 4 pd gain curves for each calibrated channel.
+ * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y
+ * axis and looks like an exponential function like the RF5111 curve.
+ *
+ * To recreate the curves we read here the points and interpolate
+ * later. Note that in most cases only 2 (higher and lower) curves are
+ * used (like RF5112) but vendors have the oportunity to include all
+ * 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
+ */
+
/* For RF2413 power calibration data doesn't start on a fixed location and
* if a mode is not supported, it's section is missing -not zeroed-.
* So we need to calculate the starting offset for each section by using
@@ -890,13 +1111,15 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
switch(mode) {
case AR5K_EEPROM_MODE_11G:
if (AR5K_EEPROM_HDR_11B(ee->ee_header))
- offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) +
- AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
+ offset += ath5k_pdgains_size_2413(ee,
+ AR5K_EEPROM_MODE_11B) +
+ AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
/* fall through */
case AR5K_EEPROM_MODE_11B:
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
- offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) +
- AR5K_EEPROM_N_5GHZ_CHAN / 2;
+ offset += ath5k_pdgains_size_2413(ee,
+ AR5K_EEPROM_MODE_11A) +
+ AR5K_EEPROM_N_5GHZ_CHAN / 2;
/* fall through */
case AR5K_EEPROM_MODE_11A:
break;
@@ -907,37 +1130,118 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
return offset;
}
-/* Read power calibration for RF2413 chips
- * For RF2413 we have a PDDAC table (Power Detector) instead
- * of a PCDAC and 4 pd gain curves for each calibrated channel.
- * Each curve has PDDAC steps on x axis and power on y axis and
- * looks like an exponential function. To recreate the curves
- * we read here the points and interpolate later. Note that
- * in most cases only higher and lower curves are used (like
- * RF5112) but vendors have the oportunity to include all 4
- * curves on eeprom. The final curve (higher power) has an extra
- * point for better accuracy like RF5112.
- */
+/* Convert RF2413 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf2413 *pcinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ unsigned int pier, pdg, point;
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf2413_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ return -ENOMEM;
+
+ /* Fill pd_curves */
+ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+
+ u8 idx = pdgain_idx[pdg];
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[idx];
+
+ /* One more point for the highest power
+ * curve (lowest gain) */
+ if (pdg == ee->ee_pd_gains[mode] - 1)
+ pd->pd_points = AR5K_EEPROM_N_PD_POINTS;
+ else
+ pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ return -ENOMEM;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ return -ENOMEM;
+
+ /* Fill raw dataset
+ * convert all pwr levels to
+ * quarter dB for RF5112 combatibility */
+ pd->pd_step[0] = pcinfo->pddac_i[pdg];
+ pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
+
+ for (point = 1; point < pd->pd_points; point++) {
+
+ pd->pd_pwr[point] = pd->pd_pwr[point - 1] +
+ 2 * pcinfo->pwr[pdg][point - 1];
+
+ pd->pd_step[point] = pd->pd_step[point - 1] +
+ pcinfo->pddac[pdg][point - 1];
+
+ }
+
+ /* Highest gain curve -> min power */
+ if (pdg == 0)
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+
+ /* Lowest gain curve -> max power */
+ if (pdg == ee->ee_pd_gains[mode] - 1)
+ chinfo[pier].max_pwr =
+ pd->pd_pwr[pd->pd_points - 1];
+ }
+ }
+
+ return 0;
+}
+
+/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- struct ath5k_chan_pcal_info_rf2413 *chan_pcal_info;
- struct ath5k_chan_pcal_info *gen_chan_info;
- unsigned int i, c;
+ struct ath5k_chan_pcal_info_rf2413 *pcinfo;
+ struct ath5k_chan_pcal_info *chinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
- int ret;
+ int idx, i, ret;
u16 val;
u8 pd_gains = 0;
- if (ee->ee_x_gain[mode] & 0x1) pd_gains++;
- if ((ee->ee_x_gain[mode] >> 1) & 0x1) pd_gains++;
- if ((ee->ee_x_gain[mode] >> 2) & 0x1) pd_gains++;
- if ((ee->ee_x_gain[mode] >> 3) & 0x1) pd_gains++;
+ /* Count how many curves we have and
+ * identify them (which one of the 4
+ * available curves we have on each count).
+ * Curves are stored from higher to
+ * lower gain so we go backwards */
+ for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) {
+ /* ee_x_gain[mode] is x gain mask */
+ if ((ee->ee_x_gain[mode] >> idx) & 0x1)
+ pdgain_idx[pd_gains++] = idx;
+
+ }
ee->ee_pd_gains[mode] = pd_gains;
+ if (pd_gains == 0)
+ return -EINVAL;
+
offset = ath5k_cal_data_offset_2413(ee, mode);
- ee->ee_n_piers[mode] = 0;
switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
@@ -945,7 +1249,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
ath5k_eeprom_init_11a_pcal_freq(ah, offset);
offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
- gen_chan_info = ee->ee_pwr_cal_a;
+ chinfo = ee->ee_pwr_cal_a;
break;
case AR5K_EEPROM_MODE_11B:
if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
@@ -953,7 +1257,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
ath5k_eeprom_init_11bg_2413(ah, mode, offset);
offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
- gen_chan_info = ee->ee_pwr_cal_b;
+ chinfo = ee->ee_pwr_cal_b;
break;
case AR5K_EEPROM_MODE_11G:
if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
@@ -961,41 +1265,35 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
ath5k_eeprom_init_11bg_2413(ah, mode, offset);
offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
- gen_chan_info = ee->ee_pwr_cal_g;
+ chinfo = ee->ee_pwr_cal_g;
break;
default:
return -EINVAL;
}
- if (pd_gains == 0)
- return 0;
-
for (i = 0; i < ee->ee_n_piers[mode]; i++) {
- chan_pcal_info = &gen_chan_info[i].rf2413_info;
+ pcinfo = &chinfo[i].rf2413_info;
/*
* Read pwr_i, pddac_i and the first
* 2 pd points (pwr, pddac)
*/
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr_i[0] = val & 0x1f;
- chan_pcal_info->pddac_i[0] = (val >> 5) & 0x7f;
- chan_pcal_info->pwr[0][0] =
- (val >> 12) & 0xf;
+ pcinfo->pwr_i[0] = val & 0x1f;
+ pcinfo->pddac_i[0] = (val >> 5) & 0x7f;
+ pcinfo->pwr[0][0] = (val >> 12) & 0xf;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[0][0] = val & 0x3f;
- chan_pcal_info->pwr[0][1] = (val >> 6) & 0xf;
- chan_pcal_info->pddac[0][1] =
- (val >> 10) & 0x3f;
+ pcinfo->pddac[0][0] = val & 0x3f;
+ pcinfo->pwr[0][1] = (val >> 6) & 0xf;
+ pcinfo->pddac[0][1] = (val >> 10) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr[0][2] = val & 0xf;
- chan_pcal_info->pddac[0][2] =
- (val >> 4) & 0x3f;
+ pcinfo->pwr[0][2] = val & 0xf;
+ pcinfo->pddac[0][2] = (val >> 4) & 0x3f;
- chan_pcal_info->pwr[0][3] = 0;
- chan_pcal_info->pddac[0][3] = 0;
+ pcinfo->pwr[0][3] = 0;
+ pcinfo->pddac[0][3] = 0;
if (pd_gains > 1) {
/*
@@ -1003,44 +1301,36 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
* so it only has 2 pd points.
* Continue wih pd gain 1.
*/
- chan_pcal_info->pwr_i[1] = (val >> 10) & 0x1f;
+ pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
- chan_pcal_info->pddac_i[1] = (val >> 15) & 0x1;
+ pcinfo->pddac_i[1] = (val >> 15) & 0x1;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac_i[1] |= (val & 0x3F) << 1;
+ pcinfo->pddac_i[1] |= (val & 0x3F) << 1;
- chan_pcal_info->pwr[1][0] = (val >> 6) & 0xf;
- chan_pcal_info->pddac[1][0] =
- (val >> 10) & 0x3f;
+ pcinfo->pwr[1][0] = (val >> 6) & 0xf;
+ pcinfo->pddac[1][0] = (val >> 10) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr[1][1] = val & 0xf;
- chan_pcal_info->pddac[1][1] =
- (val >> 4) & 0x3f;
- chan_pcal_info->pwr[1][2] =
- (val >> 10) & 0xf;
-
- chan_pcal_info->pddac[1][2] =
- (val >> 14) & 0x3;
+ pcinfo->pwr[1][1] = val & 0xf;
+ pcinfo->pddac[1][1] = (val >> 4) & 0x3f;
+ pcinfo->pwr[1][2] = (val >> 10) & 0xf;
+
+ pcinfo->pddac[1][2] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[1][2] |=
- (val & 0xF) << 2;
+ pcinfo->pddac[1][2] |= (val & 0xF) << 2;
- chan_pcal_info->pwr[1][3] = 0;
- chan_pcal_info->pddac[1][3] = 0;
+ pcinfo->pwr[1][3] = 0;
+ pcinfo->pddac[1][3] = 0;
} else if (pd_gains == 1) {
/*
* Pd gain 0 is the last one so
* read the extra point.
*/
- chan_pcal_info->pwr[0][3] =
- (val >> 10) & 0xf;
+ pcinfo->pwr[0][3] = (val >> 10) & 0xf;
- chan_pcal_info->pddac[0][3] =
- (val >> 14) & 0x3;
+ pcinfo->pddac[0][3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[0][3] |=
- (val & 0xF) << 2;
+ pcinfo->pddac[0][3] |= (val & 0xF) << 2;
}
/*
@@ -1048,105 +1338,65 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
* as above.
*/
if (pd_gains > 2) {
- chan_pcal_info->pwr_i[2] = (val >> 4) & 0x1f;
- chan_pcal_info->pddac_i[2] = (val >> 9) & 0x7f;
+ pcinfo->pwr_i[2] = (val >> 4) & 0x1f;
+ pcinfo->pddac_i[2] = (val >> 9) & 0x7f;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr[2][0] =
- (val >> 0) & 0xf;
- chan_pcal_info->pddac[2][0] =
- (val >> 4) & 0x3f;
- chan_pcal_info->pwr[2][1] =
- (val >> 10) & 0xf;
-
- chan_pcal_info->pddac[2][1] =
- (val >> 14) & 0x3;
+ pcinfo->pwr[2][0] = (val >> 0) & 0xf;
+ pcinfo->pddac[2][0] = (val >> 4) & 0x3f;
+ pcinfo->pwr[2][1] = (val >> 10) & 0xf;
+
+ pcinfo->pddac[2][1] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[2][1] |=
- (val & 0xF) << 2;
+ pcinfo->pddac[2][1] |= (val & 0xF) << 2;
- chan_pcal_info->pwr[2][2] =
- (val >> 4) & 0xf;
- chan_pcal_info->pddac[2][2] =
- (val >> 8) & 0x3f;
+ pcinfo->pwr[2][2] = (val >> 4) & 0xf;
+ pcinfo->pddac[2][2] = (val >> 8) & 0x3f;
- chan_pcal_info->pwr[2][3] = 0;
- chan_pcal_info->pddac[2][3] = 0;
+ pcinfo->pwr[2][3] = 0;
+ pcinfo->pddac[2][3] = 0;
} else if (pd_gains == 2) {
- chan_pcal_info->pwr[1][3] =
- (val >> 4) & 0xf;
- chan_pcal_info->pddac[1][3] =
- (val >> 8) & 0x3f;
+ pcinfo->pwr[1][3] = (val >> 4) & 0xf;
+ pcinfo->pddac[1][3] = (val >> 8) & 0x3f;
}
if (pd_gains > 3) {
- chan_pcal_info->pwr_i[3] = (val >> 14) & 0x3;
+ pcinfo->pwr_i[3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
+ pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
- chan_pcal_info->pddac_i[3] = (val >> 3) & 0x7f;
- chan_pcal_info->pwr[3][0] =
- (val >> 10) & 0xf;
- chan_pcal_info->pddac[3][0] =
- (val >> 14) & 0x3;
+ pcinfo->pddac_i[3] = (val >> 3) & 0x7f;
+ pcinfo->pwr[3][0] = (val >> 10) & 0xf;
+ pcinfo->pddac[3][0] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[3][0] |=
- (val & 0xF) << 2;
- chan_pcal_info->pwr[3][1] =
- (val >> 4) & 0xf;
- chan_pcal_info->pddac[3][1] =
- (val >> 8) & 0x3f;
-
- chan_pcal_info->pwr[3][2] =
- (val >> 14) & 0x3;
+ pcinfo->pddac[3][0] |= (val & 0xF) << 2;
+ pcinfo->pwr[3][1] = (val >> 4) & 0xf;
+ pcinfo->pddac[3][1] = (val >> 8) & 0x3f;
+
+ pcinfo->pwr[3][2] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr[3][2] |=
- ((val >> 0) & 0x3) << 2;
+ pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2;
- chan_pcal_info->pddac[3][2] =
- (val >> 2) & 0x3f;
- chan_pcal_info->pwr[3][3] =
- (val >> 8) & 0xf;
+ pcinfo->pddac[3][2] = (val >> 2) & 0x3f;
+ pcinfo->pwr[3][3] = (val >> 8) & 0xf;
- chan_pcal_info->pddac[3][3] =
- (val >> 12) & 0xF;
+ pcinfo->pddac[3][3] = (val >> 12) & 0xF;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pddac[3][3] |=
- ((val >> 0) & 0x3) << 4;
+ pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4;
} else if (pd_gains == 3) {
- chan_pcal_info->pwr[2][3] =
- (val >> 14) & 0x3;
+ pcinfo->pwr[2][3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
- chan_pcal_info->pwr[2][3] |=
- ((val >> 0) & 0x3) << 2;
-
- chan_pcal_info->pddac[2][3] =
- (val >> 2) & 0x3f;
- }
+ pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2;
- for (c = 0; c < pd_gains; c++) {
- /* Recreate pwr table for this channel using pwr steps */
- chan_pcal_info->pwr[c][0] += chan_pcal_info->pwr_i[c] * 2;
- chan_pcal_info->pwr[c][1] += chan_pcal_info->pwr[c][0];
- chan_pcal_info->pwr[c][2] += chan_pcal_info->pwr[c][1];
- chan_pcal_info->pwr[c][3] += chan_pcal_info->pwr[c][2];
- if (chan_pcal_info->pwr[c][3] == chan_pcal_info->pwr[c][2])
- chan_pcal_info->pwr[c][3] = 0;
-
- /* Recreate pddac table for this channel using pddac steps */
- chan_pcal_info->pddac[c][0] += chan_pcal_info->pddac_i[c];
- chan_pcal_info->pddac[c][1] += chan_pcal_info->pddac[c][0];
- chan_pcal_info->pddac[c][2] += chan_pcal_info->pddac[c][1];
- chan_pcal_info->pddac[c][3] += chan_pcal_info->pddac[c][2];
- if (chan_pcal_info->pddac[c][3] == chan_pcal_info->pddac[c][2])
- chan_pcal_info->pddac[c][3] = 0;
+ pcinfo->pddac[2][3] = (val >> 2) & 0x3f;
}
}
- return 0;
+ return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo);
}
+
/*
* Read per rate target power (this is the maximum tx power
* supported by the card). This info is used when setting
@@ -1154,11 +1404,12 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
*
* This also works for v5 EEPROMs.
*/
-static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
+static int
+ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_rate_pcal_info *rate_pcal_info;
- u16 *rate_target_pwr_num;
+ u8 *rate_target_pwr_num;
u32 offset;
u16 val;
int ret, i;
@@ -1264,7 +1515,9 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
else
read_pcal = ath5k_eeprom_read_pcal_info_5111;
- for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) {
+
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G;
+ mode++) {
err = read_pcal(ah, mode);
if (err)
return err;
@@ -1277,6 +1530,62 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
return 0;
}
+static int
+ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *chinfo;
+ u8 pier, pdg;
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_a;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_b;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_g;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+ if (!chinfo[pier].pd_curves)
+ continue;
+
+ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[pdg];
+
+ if (pd != NULL) {
+ kfree(pd->pd_step);
+ kfree(pd->pd_pwr);
+ }
+ }
+
+ kfree(chinfo[pier].pd_curves);
+ }
+
+ return 0;
+}
+
+void
+ath5k_eeprom_detach(struct ath5k_hw *ah)
+{
+ u8 mode;
+
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
+ ath5k_eeprom_free_pcal_info(ah, mode);
+}
+
/* Read conformance test limits used for regulatory control */
static int
ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1457,3 +1766,4 @@ bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah)
else
return false;
}
+
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
index 1deebc0257d4..b0c0606dea0b 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -173,6 +173,7 @@
#define AR5K_EEPROM_N_5GHZ_CHAN 10
#define AR5K_EEPROM_N_2GHZ_CHAN 3
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
+#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
#define AR5K_EEPROM_MAX_CHAN 10
#define AR5K_EEPROM_N_PWR_POINTS_5111 11
#define AR5K_EEPROM_N_PCDAC 11
@@ -193,7 +194,7 @@
#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
#define AR5K_EEPROM_MAX_CTLS 32
-#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
+#define AR5K_EEPROM_N_PD_CURVES 4
#define AR5K_EEPROM_N_XPD0_POINTS 4
#define AR5K_EEPROM_N_XPD3_POINTS 3
#define AR5K_EEPROM_N_PD_GAINS 4
@@ -232,7 +233,7 @@ enum ath5k_ctl_mode {
AR5K_CTL_11B = 1,
AR5K_CTL_11G = 2,
AR5K_CTL_TURBO = 3,
- AR5K_CTL_108G = 4,
+ AR5K_CTL_TURBOG = 4,
AR5K_CTL_2GHT20 = 5,
AR5K_CTL_5GHT20 = 6,
AR5K_CTL_2GHT40 = 7,
@@ -240,65 +241,114 @@ enum ath5k_ctl_mode {
AR5K_CTL_MODE_M = 15,
};
+/* Default CTL ids for the 3 main reg domains.
+ * Atheros only uses these by default but vendors
+ * can have up to 32 different CTLs for different
+ * scenarios. Note that theese values are ORed with
+ * the mode id (above) so we can have up to 24 CTL
+ * datasets out of these 3 main regdomains. That leaves
+ * 8 ids that can be used by vendors and since 0x20 is
+ * missing from HAL sources i guess this is the set of
+ * custom CTLs vendors can use. */
+#define AR5K_CTL_FCC 0x10
+#define AR5K_CTL_CUSTOM 0x20
+#define AR5K_CTL_ETSI 0x30
+#define AR5K_CTL_MKK 0x40
+
+/* Indicates a CTL with only mode set and
+ * no reg domain mapping, such CTLs are used
+ * for world roaming domains or simply when
+ * a reg domain is not set */
+#define AR5K_CTL_NO_REGDOMAIN 0xf0
+
+/* Indicates an empty (invalid) CTL */
+#define AR5K_CTL_NO_CTL 0xff
+
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
/* Power levels in half dbm units
* for one power curve. */
- u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
+ u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
/* PCDAC table steps
* for the above values */
- u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
+ u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
/* Starting PCDAC step */
- u8 pcdac_min;
+ u8 pcdac_min;
/* Final PCDAC step */
- u8 pcdac_max;
+ u8 pcdac_max;
};
struct ath5k_chan_pcal_info_rf5112 {
/* Power levels in quarter dBm units
* for lower (0) and higher (3)
- * level curves */
- s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
- s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
+ * level curves in 0.25dB units */
+ s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
+ s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
/* PCDAC table steps
* for the above values */
- u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
- u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
+ u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
+ u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
};
struct ath5k_chan_pcal_info_rf2413 {
/* Starting pwr/pddac values */
- s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
- u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
- /* (pwr,pddac) points */
- s8 pwr[AR5K_EEPROM_N_PD_GAINS]
- [AR5K_EEPROM_N_PD_POINTS];
- u8 pddac[AR5K_EEPROM_N_PD_GAINS]
- [AR5K_EEPROM_N_PD_POINTS];
+ s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
+ u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
+ /* (pwr,pddac) points
+ * power levels in 0.5dB units */
+ s8 pwr[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_N_PD_POINTS];
+ u8 pddac[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_N_PD_POINTS];
+};
+
+enum ath5k_powertable_type {
+ AR5K_PWRTABLE_PWR_TO_PCDAC = 0,
+ AR5K_PWRTABLE_LINEAR_PCDAC = 1,
+ AR5K_PWRTABLE_PWR_TO_PDADC = 2,
+};
+
+struct ath5k_pdgain_info {
+ u8 pd_points;
+ u8 *pd_step;
+ /* Power values are in
+ * 0.25dB units */
+ s16 *pd_pwr;
};
struct ath5k_chan_pcal_info {
/* Frequency */
u16 freq;
- /* Max available power */
- s8 max_pwr;
+ /* Tx power boundaries */
+ s16 max_pwr;
+ s16 min_pwr;
union {
struct ath5k_chan_pcal_info_rf5111 rf5111_info;
struct ath5k_chan_pcal_info_rf5112 rf5112_info;
struct ath5k_chan_pcal_info_rf2413 rf2413_info;
};
+ /* Raw values used by phy code
+ * Curves are stored in order from lower
+ * gain to higher gain (max txpower -> min txpower) */
+ struct ath5k_pdgain_info *pd_curves;
};
-/* Per rate calibration data for each mode, used for power table setup */
+/* Per rate calibration data for each mode,
+ * used for rate power table setup.
+ * Note: Values in 0.5dB units */
struct ath5k_rate_pcal_info {
u16 freq; /* Frequency */
- /* Power level for 6-24Mbit/s rates */
+ /* Power level for 6-24Mbit/s rates or
+ * 1Mb rate */
u16 target_power_6to24;
- /* Power level for 36Mbit rate */
+ /* Power level for 36Mbit rate or
+ * 2Mb rate */
u16 target_power_36;
- /* Power level for 48Mbit rate */
+ /* Power level for 48Mbit rate or
+ * 5.5Mbit rate */
u16 target_power_48;
- /* Power level for 54Mbit rate */
+ /* Power level for 54Mbit rate or
+ * 11Mbit rate */
u16 target_power_54;
};
@@ -330,12 +380,6 @@ struct ath5k_eeprom_info {
u16 ee_cck_ofdm_power_delta;
u16 ee_scaled_cck_delta;
- /* Used for tx thermal adjustment (eeprom_init, rfregs) */
- u16 ee_tx_clip;
- u16 ee_pwd_84;
- u16 ee_pwd_90;
- u16 ee_gain_select;
-
/* RF Calibration settings (reset, rfregs) */
u16 ee_i_cal[AR5K_EEPROM_N_MODES];
u16 ee_q_cal[AR5K_EEPROM_N_MODES];
@@ -363,23 +407,25 @@ struct ath5k_eeprom_info {
/* Power calibration data */
u16 ee_false_detect[AR5K_EEPROM_N_MODES];
- /* Number of pd gain curves per mode (RF2413) */
- u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
+ /* Number of pd gain curves per mode */
+ u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
+ /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */
+ u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS];
- u8 ee_n_piers[AR5K_EEPROM_N_MODES];
+ u8 ee_n_piers[AR5K_EEPROM_N_MODES];
struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
- struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN];
- struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN];
+ struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+ struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
/* Per rate target power levels */
- u16 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
+ u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
- struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN];
- struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN];
+ struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+ struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
/* Conformance test limits (Unused) */
- u16 ee_ctls;
- u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
+ u8 ee_ctls;
+ u8 ee_ctl[AR5K_EEPROM_MAX_CTLS];
struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
/* Noise Floor Calibration settings */
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 44886434187b..61fb621ed20d 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1510,8 +1510,8 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
rf2425_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
- ARRAY_SIZE(rf2413_ini_common_end),
- rf2413_ini_common_end, change_channel);
+ ARRAY_SIZE(rf2425_ini_common_end),
+ rf2425_ini_common_end, change_channel);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
diff --git a/drivers/net/wireless/ath5k/led.c b/drivers/net/wireless/ath5k/led.c
index 0686e12738b3..19555fb79c9b 100644
--- a/drivers/net/wireless/ath5k/led.c
+++ b/drivers/net/wireless/ath5k/led.c
@@ -65,6 +65,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
/* E-machines E510 (tuliom@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
+ /* Acer Extensa 5620z (nekoreeve@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
{ }
};
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index 81f5bebc48b1..9e2faae5ae94 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -4,6 +4,7 @@
* Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -183,7 +184,9 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
return;
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max,
+ /* Send the packet with 2dB below max power as
+ * patent doc suggest */
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max_pwr - 4,
AR5K_PHY_PAPD_PROBE_TXPOWER) |
AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
@@ -1433,93 +1436,1120 @@ unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
return false; /*XXX: What do we return for 5210 ?*/
}
+
+/****************\
+* TX power setup *
+\****************/
+
+/*
+ * Helper functions
+ */
+
+/*
+ * Do linear interpolation between two given (x, y) points
+ */
+static s16
+ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
+ s16 y_left, s16 y_right)
+{
+ s16 ratio, result;
+
+ /* Avoid divide by zero and skip interpolation
+ * if we have the same point */
+ if ((x_left == x_right) || (y_left == y_right))
+ return y_left;
+
+ /*
+ * Since we use ints and not fps, we need to scale up in
+ * order to get a sane ratio value (or else we 'll eg. get
+ * always 1 instead of 1.25, 1.75 etc). We scale up by 100
+ * to have some accuracy both for 0.5 and 0.25 steps.
+ */
+ ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left));
+
+ /* Now scale down to be in range */
+ result = y_left + (ratio * (target - x_left) / 100);
+
+ return result;
+}
+
+/*
+ * Find vertical boundary (min pwr) for the linear PCDAC curve.
+ *
+ * Since we have the top of the curve and we draw the line below
+ * until we reach 1 (1 pcdac step) we need to know which point
+ * (x value) that is so that we don't go below y axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeroes.
+ */
+static s16
+ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
+ const s16 *pwrL, const s16 *pwrR)
+{
+ s8 tmp;
+ s16 min_pwrL, min_pwrR;
+ s16 pwr_i = pwrL[0];
+
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrL[0], pwrL[1],
+ stepL[0], stepL[1]);
+
+ } while (tmp > 1);
+
+ min_pwrL = pwr_i;
+
+ pwr_i = pwrR[0];
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrR[0], pwrR[1],
+ stepR[0], stepR[1]);
+
+ } while (tmp > 1);
+
+ min_pwrR = pwr_i;
+
+ /* Keep the right boundary so that it works for both curves */
+ return max(min_pwrL, min_pwrR);
+}
+
+/*
+ * Interpolate (pwr,vpd) points to create a Power to PDADC or a
+ * Power to PCDAC curve.
+ *
+ * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC
+ * steps (offsets) on y axis. Power can go up to 31.5dB and max
+ * PCDAC/PDADC step for each curve is 64 but we can write more than
+ * one curves on hw so we can go up to 128 (which is the max step we
+ * can write on the final table).
+ *
+ * We write y values (PCDAC/PDADC steps) on hw.
+ */
+static void
+ath5k_create_power_curve(s16 pmin, s16 pmax,
+ const s16 *pwr, const u8 *vpd,
+ u8 num_points,
+ u8 *vpd_table, u8 type)
+{
+ u8 idx[2] = { 0, 1 };
+ s16 pwr_i = 2*pmin;
+ int i;
+
+ if (num_points < 2)
+ return;
+
+ /* We want the whole line, so adjust boundaries
+ * to cover the entire power range. Note that
+ * power values are already 0.25dB so no need
+ * to multiply pwr_i by 2 */
+ if (type == AR5K_PWRTABLE_LINEAR_PCDAC) {
+ pwr_i = pmin;
+ pmin = 0;
+ pmax = 63;
+ }
+
+ /* Find surrounding turning points (TPs)
+ * and interpolate between them */
+ for (i = 0; (i <= (u16) (pmax - pmin)) &&
+ (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
+
+ /* We passed the right TP, move to the next set of TPs
+ * if we pass the last TP, extrapolate above using the last
+ * two TPs for ratio */
+ if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) {
+ idx[0]++;
+ idx[1]++;
+ }
+
+ vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i,
+ pwr[idx[0]], pwr[idx[1]],
+ vpd[idx[0]], vpd[idx[1]]);
+
+ /* Increase by 0.5dB
+ * (0.25 dB units) */
+ pwr_i += 2;
+ }
+}
+
+/*
+ * Get the surrounding per-channel power calibration piers
+ * for a given frequency so that we can interpolate between
+ * them and come up with an apropriate dataset for our current
+ * channel.
+ */
+static void
+ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ struct ath5k_chan_pcal_info **pcinfo_l,
+ struct ath5k_chan_pcal_info **pcinfo_r)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *pcinfo;
+ u8 idx_l, idx_r;
+ u8 mode, max, i;
+ u32 target = channel->center_freq;
+
+ idx_l = 0;
+ idx_r = 0;
+
+ if (!(channel->hw_value & CHANNEL_OFDM)) {
+ pcinfo = ee->ee_pwr_cal_b;
+ mode = AR5K_EEPROM_MODE_11B;
+ } else if (channel->hw_value & CHANNEL_2GHZ) {
+ pcinfo = ee->ee_pwr_cal_g;
+ mode = AR5K_EEPROM_MODE_11G;
+ } else {
+ pcinfo = ee->ee_pwr_cal_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ }
+ max = ee->ee_n_piers[mode] - 1;
+
+ /* Frequency is below our calibrated
+ * range. Use the lowest power curve
+ * we have */
+ if (target < pcinfo[0].freq) {
+ idx_l = idx_r = 0;
+ goto done;
+ }
+
+ /* Frequency is above our calibrated
+ * range. Use the highest power curve
+ * we have */
+ if (target > pcinfo[max].freq) {
+ idx_l = idx_r = max;
+ goto done;
+ }
+
+ /* Frequency is inside our calibrated
+ * channel range. Pick the surrounding
+ * calibration piers so that we can
+ * interpolate */
+ for (i = 0; i <= max; i++) {
+
+ /* Frequency matches one of our calibration
+ * piers, no need to interpolate, just use
+ * that calibration pier */
+ if (pcinfo[i].freq == target) {
+ idx_l = idx_r = i;
+ goto done;
+ }
+
+ /* We found a calibration pier that's above
+ * frequency, use this pier and the previous
+ * one to interpolate */
+ if (target < pcinfo[i].freq) {
+ idx_r = i;
+ idx_l = idx_r - 1;
+ goto done;
+ }
+ }
+
+done:
+ *pcinfo_l = &pcinfo[idx_l];
+ *pcinfo_r = &pcinfo[idx_r];
+
+ return;
+}
+
+/*
+ * Get the surrounding per-rate power calibration data
+ * for a given frequency and interpolate between power
+ * values to set max target power supported by hw for
+ * each rate.
+ */
+static void
+ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ struct ath5k_rate_pcal_info *rates)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_rate_pcal_info *rpinfo;
+ u8 idx_l, idx_r;
+ u8 mode, max, i;
+ u32 target = channel->center_freq;
+
+ idx_l = 0;
+ idx_r = 0;
+
+ if (!(channel->hw_value & CHANNEL_OFDM)) {
+ rpinfo = ee->ee_rate_tpwr_b;
+ mode = AR5K_EEPROM_MODE_11B;
+ } else if (channel->hw_value & CHANNEL_2GHZ) {
+ rpinfo = ee->ee_rate_tpwr_g;
+ mode = AR5K_EEPROM_MODE_11G;
+ } else {
+ rpinfo = ee->ee_rate_tpwr_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ }
+ max = ee->ee_rate_target_pwr_num[mode] - 1;
+
+ /* Get the surrounding calibration
+ * piers - same as above */
+ if (target < rpinfo[0].freq) {
+ idx_l = idx_r = 0;
+ goto done;
+ }
+
+ if (target > rpinfo[max].freq) {
+ idx_l = idx_r = max;
+ goto done;
+ }
+
+ for (i = 0; i <= max; i++) {
+
+ if (rpinfo[i].freq == target) {
+ idx_l = idx_r = i;
+ goto done;
+ }
+
+ if (target < rpinfo[i].freq) {
+ idx_r = i;
+ idx_l = idx_r - 1;
+ goto done;
+ }
+ }
+
+done:
+ /* Now interpolate power value, based on the frequency */
+ rates->freq = target;
+
+ rates->target_power_6to24 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_6to24,
+ rpinfo[idx_r].target_power_6to24);
+
+ rates->target_power_36 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_36,
+ rpinfo[idx_r].target_power_36);
+
+ rates->target_power_48 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_48,
+ rpinfo[idx_r].target_power_48);
+
+ rates->target_power_54 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_54,
+ rpinfo[idx_r].target_power_54);
+}
+
+/*
+ * Get the max edge power for this channel if
+ * we have such data from EEPROM's Conformance Test
+ * Limits (CTL), and limit max power if needed.
+ *
+ * FIXME: Only works for world regulatory domains
+ */
+static void
+ath5k_get_max_ctl_power(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_edge_power *rep = ee->ee_ctl_pwr;
+ u8 *ctl_val = ee->ee_ctl;
+ s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4;
+ s16 edge_pwr = 0;
+ u8 rep_idx;
+ u8 i, ctl_mode;
+ u8 ctl_idx = 0xFF;
+ u32 target = channel->center_freq;
+
+ /* Find out a CTL for our mode that's not mapped
+ * on a specific reg domain.
+ *
+ * TODO: Map our current reg domain to one of the 3 available
+ * reg domain ids so that we can support more CTLs. */
+ switch (channel->hw_value & CHANNEL_MODES) {
+ case CHANNEL_A:
+ ctl_mode = AR5K_CTL_11A | AR5K_CTL_NO_REGDOMAIN;
+ break;
+ case CHANNEL_G:
+ ctl_mode = AR5K_CTL_11G | AR5K_CTL_NO_REGDOMAIN;
+ break;
+ case CHANNEL_B:
+ ctl_mode = AR5K_CTL_11B | AR5K_CTL_NO_REGDOMAIN;
+ break;
+ case CHANNEL_T:
+ ctl_mode = AR5K_CTL_TURBO | AR5K_CTL_NO_REGDOMAIN;
+ break;
+ case CHANNEL_TG:
+ ctl_mode = AR5K_CTL_TURBOG | AR5K_CTL_NO_REGDOMAIN;
+ break;
+ case CHANNEL_XR:
+ /* Fall through */
+ default:
+ return;
+ }
+
+ for (i = 0; i < ee->ee_ctls; i++) {
+ if (ctl_val[i] == ctl_mode) {
+ ctl_idx = i;
+ break;
+ }
+ }
+
+ /* If we have a CTL dataset available grab it and find the
+ * edge power for our frequency */
+ if (ctl_idx == 0xFF)
+ return;
+
+ /* Edge powers are sorted by frequency from lower
+ * to higher. Each CTL corresponds to 8 edge power
+ * measurements. */
+ rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES;
+
+ /* Don't do boundaries check because we
+ * might have more that one bands defined
+ * for this mode */
+
+ /* Get the edge power that's closer to our
+ * frequency */
+ for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) {
+ rep_idx += i;
+ if (target <= rep[rep_idx].freq)
+ edge_pwr = (s16) rep[rep_idx].edge;
+ }
+
+ if (edge_pwr)
+ ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr);
+}
+
+
+/*
+ * Power to PCDAC table functions
+ */
+
/*
- * TX power setup
+ * Fill Power to PCDAC table on RF5111
+ *
+ * No further processing is needed for RF5111, the only thing we have to
+ * do is fill the values below and above calibration range since eeprom data
+ * may not cover the entire PCDAC table.
*/
+static void
+ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
+ s16 *table_max)
+{
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
+ u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
+ s16 min_pwr, max_pwr;
+
+ /* Get table boundaries */
+ min_pwr = table_min[0];
+ pcdac_0 = pcdac_tmp[0];
+
+ max_pwr = table_max[0];
+ pcdac_n = pcdac_tmp[table_max[0] - table_min[0]];
+
+ /* Extrapolate below minimum using pcdac_0 */
+ pcdac_i = 0;
+ for (i = 0; i < min_pwr; i++)
+ pcdac_out[pcdac_i++] = pcdac_0;
+
+ /* Copy values from pcdac_tmp */
+ pwr_idx = min_pwr;
+ for (i = 0 ; pwr_idx <= max_pwr &&
+ pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
+ pcdac_out[pcdac_i++] = pcdac_tmp[i];
+ pwr_idx++;
+ }
+
+ /* Extrapolate above maximum */
+ while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE)
+ pcdac_out[pcdac_i++] = pcdac_n;
+
+}
/*
- * Initialize the tx power table (not fully implemented)
+ * Combine available XPD Curves and fill Linear Power to PCDAC table
+ * on RF5112
+ *
+ * RFX112 can have up to 2 curves (one for low txpower range and one for
+ * higher txpower range). We need to put them both on pcdac_out and place
+ * them in the correct location. In case we only have one curve available
+ * just fit it on pcdac_out (it's supposed to cover the entire range of
+ * available pwr levels since it's always the higher power curve). Extrapolate
+ * below and above final table if needed.
*/
-static void ath5k_txpower_table(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, s16 max_power)
+static void
+ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
+ s16 *table_max, u8 pdcurves)
{
- unsigned int i, min, max, n;
- u16 txpower, *rates;
-
- rates = ah->ah_txpower.txp_rates;
-
- txpower = AR5K_TUNE_DEFAULT_TXPOWER * 2;
- if (max_power > txpower)
- txpower = max_power > AR5K_TUNE_MAX_TXPOWER ?
- AR5K_TUNE_MAX_TXPOWER : max_power;
-
- for (i = 0; i < AR5K_MAX_RATES; i++)
- rates[i] = txpower;
-
- /* XXX setup target powers by rate */
-
- ah->ah_txpower.txp_min = rates[7];
- ah->ah_txpower.txp_max = rates[0];
- ah->ah_txpower.txp_ofdm = rates[0];
-
- /* Calculate the power table */
- n = ARRAY_SIZE(ah->ah_txpower.txp_pcdac);
- min = AR5K_EEPROM_PCDAC_START;
- max = AR5K_EEPROM_PCDAC_STOP;
- for (i = 0; i < n; i += AR5K_EEPROM_PCDAC_STEP)
- ah->ah_txpower.txp_pcdac[i] =
-#ifdef notyet
- min + ((i * (max - min)) / n);
-#else
- min;
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_low_pwr;
+ u8 *pcdac_high_pwr;
+ u8 *pcdac_tmp;
+ u8 pwr;
+ s16 max_pwr_idx;
+ s16 min_pwr_idx;
+ s16 mid_pwr_idx = 0;
+ /* Edge flag turs on the 7nth bit on the PCDAC
+ * to delcare the higher power curve (force values
+ * to be greater than 64). If we only have one curve
+ * we don't need to set this, if we have 2 curves and
+ * fill the table backwards this can also be used to
+ * switch from higher power curve to lower power curve */
+ u8 edge_flag;
+ int i;
+
+ /* When we have only one curve available
+ * that's the higher power curve. If we have
+ * two curves the first is the high power curve
+ * and the next is the low power curve. */
+ if (pdcurves > 1) {
+ pcdac_low_pwr = ah->ah_txpower.tmpL[1];
+ pcdac_high_pwr = ah->ah_txpower.tmpL[0];
+ mid_pwr_idx = table_max[1] - table_min[1] - 1;
+ max_pwr_idx = (table_max[0] - table_min[0]) / 2;
+
+ /* If table size goes beyond 31.5dB, keep the
+ * upper 31.5dB range when setting tx power.
+ * Note: 126 = 31.5 dB in quarter dB steps */
+ if (table_max[0] - table_min[1] > 126)
+ min_pwr_idx = table_max[0] - 126;
+ else
+ min_pwr_idx = table_min[1];
+
+ /* Since we fill table backwards
+ * start from high power curve */
+ pcdac_tmp = pcdac_high_pwr;
+
+ edge_flag = 0x40;
+#if 0
+ /* If both min and max power limits are in lower
+ * power curve's range, only use the low power curve.
+ * TODO: min/max levels are related to target
+ * power values requested from driver/user
+ * XXX: Is this really needed ? */
+ if (min_pwr < table_max[1] &&
+ max_pwr < table_max[1]) {
+ edge_flag = 0;
+ pcdac_tmp = pcdac_low_pwr;
+ max_pwr_idx = (table_max[1] - table_min[1])/2;
+ }
#endif
+ } else {
+ pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
+ pcdac_high_pwr = ah->ah_txpower.tmpL[0];
+ min_pwr_idx = table_min[0];
+ max_pwr_idx = (table_max[0] - table_min[0]) / 2;
+ pcdac_tmp = pcdac_high_pwr;
+ edge_flag = 0;
+ }
+
+ /* This is used when setting tx power*/
+ ah->ah_txpower.txp_min_idx = min_pwr_idx/2;
+
+ /* Fill Power to PCDAC table backwards */
+ pwr = max_pwr_idx;
+ for (i = 63; i >= 0; i--) {
+ /* Entering lower power range, reset
+ * edge flag and set pcdac_tmp to lower
+ * power curve.*/
+ if (edge_flag == 0x40 &&
+ (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
+ edge_flag = 0x00;
+ pcdac_tmp = pcdac_low_pwr;
+ pwr = mid_pwr_idx/2;
+ }
+
+ /* Don't go below 1, extrapolate below if we have
+ * already swithced to the lower power curve -or
+ * we only have one curve and edge_flag is zero
+ * anyway */
+ if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
+ while (i >= 0) {
+ pcdac_out[i] = pcdac_out[i + 1];
+ i--;
+ }
+ break;
+ }
+
+ pcdac_out[i] = pcdac_tmp[pwr] | edge_flag;
+
+ /* Extrapolate above if pcdac is greater than
+ * 126 -this can happen because we OR pcdac_out
+ * value with edge_flag on high power curve */
+ if (pcdac_out[i] > 126)
+ pcdac_out[i] = 126;
+
+ /* Decrease by a 0.5dB step */
+ pwr--;
+ }
}
+/* Write PCDAC values on hw */
+static void
+ath5k_setup_pcdac_table(struct ath5k_hw *ah)
+{
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ int i;
+
+ /*
+ * Write TX power values
+ */
+ for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
+ ath5k_hw_reg_write(ah,
+ (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) |
+ (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16),
+ AR5K_PHY_PCDAC_TXPOWER(i));
+ }
+}
+
+
/*
- * Set transmition power
+ * Power to PDADC table functions
*/
-int /*O.K. - txpower_table is unimplemented so this doesn't work*/
-ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
- unsigned int txpower)
+
+/*
+ * Set the gain boundaries and create final Power to PDADC table
+ *
+ * We can have up to 4 pd curves, we need to do a simmilar process
+ * as we do for RF5112. This time we don't have an edge_flag but we
+ * set the gain boundaries on a separate register.
+ */
+static void
+ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
+ s16 *pwr_min, s16 *pwr_max, u8 pdcurves)
{
- bool tpc = ah->ah_txpower.txp_tpc;
- unsigned int i;
+ u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS];
+ u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
+ u8 *pdadc_tmp;
+ s16 pdadc_0;
+ u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size;
+ u8 pd_gain_overlap;
+
+ /* Note: Register value is initialized on initvals
+ * there is no feedback from hw.
+ * XXX: What about pd_gain_overlap from EEPROM ? */
+ pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) &
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP;
+
+ /* Create final PDADC table */
+ for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) {
+ pdadc_tmp = ah->ah_txpower.tmpL[pdg];
+
+ if (pdg == pdcurves - 1)
+ /* 2 dB boundary stretch for last
+ * (higher power) curve */
+ gain_boundaries[pdg] = pwr_max[pdg] + 4;
+ else
+ /* Set gain boundary in the middle
+ * between this curve and the next one */
+ gain_boundaries[pdg] =
+ (pwr_max[pdg] + pwr_min[pdg + 1]) / 2;
+
+ /* Sanity check in case our 2 db stretch got out of
+ * range. */
+ if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER)
+ gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER;
+
+ /* For the first curve (lower power)
+ * start from 0 dB */
+ if (pdg == 0)
+ pdadc_0 = 0;
+ else
+ /* For the other curves use the gain overlap */
+ pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) -
+ pd_gain_overlap;
- ATH5K_TRACE(ah->ah_sc);
- if (txpower > AR5K_TUNE_MAX_TXPOWER) {
- ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
- return -EINVAL;
+ /* Force each power step to be at least 0.5 dB */
+ if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
+ pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
+ else
+ pwr_step = 1;
+
+ /* If pdadc_0 is negative, we need to extrapolate
+ * below this pdgain by a number of pwr_steps */
+ while ((pdadc_0 < 0) && (pdadc_i < 128)) {
+ s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step;
+ pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp;
+ pdadc_0++;
+ }
+
+ /* Set last pwr level, using gain boundaries */
+ pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
+ /* Limit it to be inside pwr range */
+ table_size = pwr_max[pdg] - pwr_min[pdg];
+ max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
+
+ /* Fill pdadc_out table */
+ while (pdadc_0 < max_idx)
+ pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
+
+ /* Need to extrapolate above this pdgain? */
+ if (pdadc_n <= max_idx)
+ continue;
+
+ /* Force each power step to be at least 0.5 dB */
+ if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
+ pwr_step = pdadc_tmp[table_size - 1] -
+ pdadc_tmp[table_size - 2];
+ else
+ pwr_step = 1;
+
+ /* Extrapolate above */
+ while ((pdadc_0 < (s16) pdadc_n) &&
+ (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) {
+ s16 tmp = pdadc_tmp[table_size - 1] +
+ (pdadc_0 - max_idx) * pwr_step;
+ pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp;
+ pdadc_0++;
+ }
}
+ while (pdg < AR5K_EEPROM_N_PD_GAINS) {
+ gain_boundaries[pdg] = gain_boundaries[pdg - 1];
+ pdg++;
+ }
+
+ while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) {
+ pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1];
+ pdadc_i++;
+ }
+
+ /* Set gain boundaries */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(pd_gain_overlap,
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) |
+ AR5K_REG_SM(gain_boundaries[0],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) |
+ AR5K_REG_SM(gain_boundaries[1],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) |
+ AR5K_REG_SM(gain_boundaries[2],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) |
+ AR5K_REG_SM(gain_boundaries[3],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4),
+ AR5K_PHY_TPC_RG5);
+
+ /* Used for setting rate power table */
+ ah->ah_txpower.txp_min_idx = pwr_min[0];
+
+}
+
+/* Write PDADC values on hw */
+static void
+ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah,
+ u8 pdcurves, u8 *pdg_to_idx)
+{
+ u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
+ u32 reg;
+ u8 i;
+
+ /* Select the right pdgain curves */
+
+ /* Clear current settings */
+ reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1);
+ reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 |
+ AR5K_PHY_TPC_RG1_PDGAIN_2 |
+ AR5K_PHY_TPC_RG1_PDGAIN_3 |
+ AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
+
/*
- * RF2413 for some reason can't
- * transmit anything if we call
- * this funtion, so we skip it
- * until we fix txpower.
+ * Use pd_gains curve from eeprom
*
- * XXX: Assume same for RF2425
- * to be safe.
+ * This overrides the default setting from initvals
+ * in case some vendors (e.g. Zcomax) don't use the default
+ * curves. If we don't honor their settings we 'll get a
+ * 5dB (1 * gain overlap ?) drop.
*/
- if ((ah->ah_radio == AR5K_RF2413) || (ah->ah_radio == AR5K_RF2425))
- return 0;
+ reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
- /* Reset TX power values */
- memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
- ah->ah_txpower.txp_tpc = tpc;
-
- /* Initialize TX power table */
- ath5k_txpower_table(ah, channel, txpower);
+ switch (pdcurves) {
+ case 3:
+ reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
+ /* Fall through */
+ case 2:
+ reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
+ /* Fall through */
+ case 1:
+ reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
+ break;
+ }
+ ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1);
/*
* Write TX power values
*/
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
ath5k_hw_reg_write(ah,
- ((((ah->ah_txpower.txp_pcdac[(i << 1) + 1] << 8) | 0xff) & 0xffff) << 16) |
- (((ah->ah_txpower.txp_pcdac[(i << 1) ] << 8) | 0xff) & 0xffff),
- AR5K_PHY_PCDAC_TXPOWER(i));
+ ((pdadc_out[4*i + 0] & 0xff) << 0) |
+ ((pdadc_out[4*i + 1] & 0xff) << 8) |
+ ((pdadc_out[4*i + 2] & 0xff) << 16) |
+ ((pdadc_out[4*i + 3] & 0xff) << 24),
+ AR5K_PHY_PDADC_TXPOWER(i));
+ }
+}
+
+
+/*
+ * Common code for PCDAC/PDADC tables
+ */
+
+/*
+ * This is the main function that uses all of the above
+ * to set PCDAC/PDADC table on hw for the current channel.
+ * This table is used for tx power calibration on the basband,
+ * without it we get weird tx power levels and in some cases
+ * distorted spectral mask
+ */
+static int
+ath5k_setup_channel_powertable(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ u8 ee_mode, u8 type)
+{
+ struct ath5k_pdgain_info *pdg_L, *pdg_R;
+ struct ath5k_chan_pcal_info *pcinfo_L;
+ struct ath5k_chan_pcal_info *pcinfo_R;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
+ s16 table_min[AR5K_EEPROM_N_PD_GAINS];
+ s16 table_max[AR5K_EEPROM_N_PD_GAINS];
+ u8 *tmpL;
+ u8 *tmpR;
+ u32 target = channel->center_freq;
+ int pdg, i;
+
+ /* Get surounding freq piers for this channel */
+ ath5k_get_chan_pcal_surrounding_piers(ah, channel,
+ &pcinfo_L,
+ &pcinfo_R);
+
+ /* Loop over pd gain curves on
+ * surounding freq piers by index */
+ for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
+
+ /* Fill curves in reverse order
+ * from lower power (max gain)
+ * to higher power. Use curve -> idx
+ * backmaping we did on eeprom init */
+ u8 idx = pdg_curve_to_idx[pdg];
+
+ /* Grab the needed curves by index */
+ pdg_L = &pcinfo_L->pd_curves[idx];
+ pdg_R = &pcinfo_R->pd_curves[idx];
+
+ /* Initialize the temp tables */
+ tmpL = ah->ah_txpower.tmpL[pdg];
+ tmpR = ah->ah_txpower.tmpR[pdg];
+
+ /* Set curve's x boundaries and create
+ * curves so that they cover the same
+ * range (if we don't do that one table
+ * will have values on some range and the
+ * other one won't have any so interpolation
+ * will fail) */
+ table_min[pdg] = min(pdg_L->pd_pwr[0],
+ pdg_R->pd_pwr[0]) / 2;
+
+ table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
+ pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2;
+
+ /* Now create the curves on surrounding channels
+ * and interpolate if needed to get the final
+ * curve for this gain on this channel */
+ switch (type) {
+ case AR5K_PWRTABLE_LINEAR_PCDAC:
+ /* Override min/max so that we don't loose
+ * accuracy (don't divide by 2) */
+ table_min[pdg] = min(pdg_L->pd_pwr[0],
+ pdg_R->pd_pwr[0]);
+
+ table_max[pdg] =
+ max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
+ pdg_R->pd_pwr[pdg_R->pd_points - 1]);
+
+ /* Override minimum so that we don't get
+ * out of bounds while extrapolating
+ * below. Don't do this when we have 2
+ * curves and we are on the high power curve
+ * because table_min is ok in this case */
+ if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) {
+
+ table_min[pdg] =
+ ath5k_get_linear_pcdac_min(pdg_L->pd_step,
+ pdg_R->pd_step,
+ pdg_L->pd_pwr,
+ pdg_R->pd_pwr);
+
+ /* Don't go too low because we will
+ * miss the upper part of the curve.
+ * Note: 126 = 31.5dB (max power supported)
+ * in 0.25dB units */
+ if (table_max[pdg] - table_min[pdg] > 126)
+ table_min[pdg] = table_max[pdg] - 126;
+ }
+
+ /* Fall through */
+ case AR5K_PWRTABLE_PWR_TO_PCDAC:
+ case AR5K_PWRTABLE_PWR_TO_PDADC:
+
+ ath5k_create_power_curve(table_min[pdg],
+ table_max[pdg],
+ pdg_L->pd_pwr,
+ pdg_L->pd_step,
+ pdg_L->pd_points, tmpL, type);
+
+ /* We are in a calibration
+ * pier, no need to interpolate
+ * between freq piers */
+ if (pcinfo_L == pcinfo_R)
+ continue;
+
+ ath5k_create_power_curve(table_min[pdg],
+ table_max[pdg],
+ pdg_R->pd_pwr,
+ pdg_R->pd_step,
+ pdg_R->pd_points, tmpR, type);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Interpolate between curves
+ * of surounding freq piers to
+ * get the final curve for this
+ * pd gain. Re-use tmpL for interpolation
+ * output */
+ for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) &&
+ (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
+ tmpL[i] = (u8) ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ (s16) tmpL[i],
+ (s16) tmpR[i]);
+ }
}
+ /* Now we have a set of curves for this
+ * channel on tmpL (x range is table_max - table_min
+ * and y values are tmpL[pdg][]) sorted in the same
+ * order as EEPROM (because we've used the backmaping).
+ * So for RF5112 it's from higher power to lower power
+ * and for RF2413 it's from lower power to higher power.
+ * For RF5111 we only have one curve. */
+
+ /* Fill min and max power levels for this
+ * channel by interpolating the values on
+ * surounding channels to complete the dataset */
+ ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ pcinfo_L->min_pwr, pcinfo_R->min_pwr);
+
+ ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ pcinfo_L->max_pwr, pcinfo_R->max_pwr);
+
+ /* We are ready to go, fill PCDAC/PDADC
+ * table and write settings on hardware */
+ switch (type) {
+ case AR5K_PWRTABLE_LINEAR_PCDAC:
+ /* For RF5112 we can have one or two curves
+ * and each curve covers a certain power lvl
+ * range so we need to do some more processing */
+ ath5k_combine_linear_pcdac_curves(ah, table_min, table_max,
+ ee->ee_pd_gains[ee_mode]);
+
+ /* Set txp.offset so that we can
+ * match max power value with max
+ * table index */
+ ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
+
+ /* Write settings on hw */
+ ath5k_setup_pcdac_table(ah);
+ break;
+ case AR5K_PWRTABLE_PWR_TO_PCDAC:
+ /* We are done for RF5111 since it has only
+ * one curve, just fit the curve on the table */
+ ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max);
+
+ /* No rate powertable adjustment for RF5111 */
+ ah->ah_txpower.txp_min_idx = 0;
+ ah->ah_txpower.txp_offset = 0;
+
+ /* Write settings on hw */
+ ath5k_setup_pcdac_table(ah);
+ break;
+ case AR5K_PWRTABLE_PWR_TO_PDADC:
+ /* Set PDADC boundaries and fill
+ * final PDADC table */
+ ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
+ ee->ee_pd_gains[ee_mode]);
+
+ /* Write settings on hw */
+ ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx);
+
+ /* Set txp.offset, note that table_min
+ * can be negative */
+ ah->ah_txpower.txp_offset = table_min[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Per-rate tx power setting
+ *
+ * This is the code that sets the desired tx power (below
+ * maximum) on hw for each rate (we also have TPC that sets
+ * power per packet). We do that by providing an index on the
+ * PCDAC/PDADC table we set up.
+ */
+
+/*
+ * Set rate power table
+ *
+ * For now we only limit txpower based on maximum tx power
+ * supported by hw (what's inside rate_info). We need to limit
+ * this even more, based on regulatory domain etc.
+ *
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
+ * and is indexed as follows:
+ * rates[0] - rates[7] -> OFDM rates
+ * rates[8] - rates[14] -> CCK rates
+ * rates[15] -> XR rates (they all have the same power)
+ */
+static void
+ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
+ struct ath5k_rate_pcal_info *rate_info,
+ u8 ee_mode)
+{
+ unsigned int i;
+ u16 *rates;
+
+ /* max_pwr is power level we got from driver/user in 0.5dB
+ * units, switch to 0.25dB units so we can compare */
+ max_pwr *= 2;
+ max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2;
+
+ /* apply rate limits */
+ rates = ah->ah_txpower.txp_rates_power_table;
+
+ /* OFDM rates 6 to 24Mb/s */
+ for (i = 0; i < 5; i++)
+ rates[i] = min(max_pwr, rate_info->target_power_6to24);
+
+ /* Rest OFDM rates */
+ rates[5] = min(rates[0], rate_info->target_power_36);
+ rates[6] = min(rates[0], rate_info->target_power_48);
+ rates[7] = min(rates[0], rate_info->target_power_54);
+
+ /* CCK rates */
+ /* 1L */
+ rates[8] = min(rates[0], rate_info->target_power_6to24);
+ /* 2L */
+ rates[9] = min(rates[0], rate_info->target_power_36);
+ /* 2S */
+ rates[10] = min(rates[0], rate_info->target_power_36);
+ /* 5L */
+ rates[11] = min(rates[0], rate_info->target_power_48);
+ /* 5S */
+ rates[12] = min(rates[0], rate_info->target_power_48);
+ /* 11L */
+ rates[13] = min(rates[0], rate_info->target_power_54);
+ /* 11S */
+ rates[14] = min(rates[0], rate_info->target_power_54);
+
+ /* XR rates */
+ rates[15] = min(rates[0], rate_info->target_power_6to24);
+
+ /* CCK rates have different peak to average ratio
+ * so we have to tweak their power so that gainf
+ * correction works ok. For this we use OFDM to
+ * CCK delta from eeprom */
+ if ((ee_mode == AR5K_EEPROM_MODE_11G) &&
+ (ah->ah_phy_revision < AR5K_SREV_PHY_5212A))
+ for (i = 8; i <= 15; i++)
+ rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
+
+ ah->ah_txpower.txp_min_pwr = rates[7];
+ ah->ah_txpower.txp_max_pwr = rates[0];
+ ah->ah_txpower.txp_ofdm = rates[7];
+}
+
+
+/*
+ * Set transmition power
+ */
+int
+ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 ee_mode, u8 txpower)
+{
+ struct ath5k_rate_pcal_info rate_info;
+ u8 type;
+ int ret;
+
+ ATH5K_TRACE(ah->ah_sc);
+ if (txpower > AR5K_TUNE_MAX_TXPOWER) {
+ ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
+ return -EINVAL;
+ }
+ if (txpower == 0)
+ txpower = AR5K_TUNE_DEFAULT_TXPOWER;
+
+ /* Reset TX power values */
+ memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+ ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+ ah->ah_txpower.txp_min_pwr = 0;
+ ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER;
+
+ /* Initialize TX power table */
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+ type = AR5K_PWRTABLE_PWR_TO_PCDAC;
+ break;
+ case AR5K_RF5112:
+ type = AR5K_PWRTABLE_LINEAR_PCDAC;
+ break;
+ case AR5K_RF2413:
+ case AR5K_RF5413:
+ case AR5K_RF2316:
+ case AR5K_RF2317:
+ case AR5K_RF2425:
+ type = AR5K_PWRTABLE_PWR_TO_PDADC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* FIXME: Only on channel/mode change */
+ ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type);
+ if (ret)
+ return ret;
+
+ /* Limit max power if we have a CTL available */
+ ath5k_get_max_ctl_power(ah, channel);
+
+ /* FIXME: Tx power limit for this regdomain
+ * XXX: Mac80211/CRDA will do that anyway ? */
+
+ /* FIXME: Antenna reduction stuff */
+
+ /* FIXME: Limit power on turbo modes */
+
+ /* FIXME: TPC scale reduction */
+
+ /* Get surounding channels for per-rate power table
+ * calibration */
+ ath5k_get_rate_pcal_data(ah, channel, &rate_info);
+
+ /* Setup rate power table */
+ ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode);
+
+ /* Write rate power table on hw */
ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) |
AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) |
AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1);
@@ -1536,26 +2566,34 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
- if (ah->ah_txpower.txp_tpc)
+ /* FIXME: TPC support */
+ if (ah->ah_txpower.txp_tpc) {
ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
- else
+
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) |
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) |
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
+ AR5K_TPC);
+ } else {
ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+ }
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power)
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 mode, u8 txpower)
{
/*Just a try M.F.*/
struct ieee80211_channel *channel = &ah->ah_current_channel;
ATH5K_TRACE(ah->ah_sc);
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
- "changing txpower to %d\n", power);
+ "changing txpower to %d\n", txpower);
- return ath5k_hw_txpower(ah, channel, power);
+ return ath5k_hw_txpower(ah, channel, mode, txpower);
}
#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 2dc008e10226..7070d1543cdc 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1554,6 +1554,19 @@
/*===5212 Specific PCU registers===*/
/*
+ * Transmit power control register
+ */
+#define AR5K_TPC 0x80e8
+#define AR5K_TPC_ACK 0x0000003f /* ack frames */
+#define AR5K_TPC_ACK_S 0
+#define AR5K_TPC_CTS 0x00003f00 /* cts frames */
+#define AR5K_TPC_CTS_S 8
+#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */
+#define AR5K_TPC_CHIRP_S 16
+#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */
+#define AR5K_TPC_DOPPLER_S 24
+
+/*
* XR (eXtended Range) mode register
*/
#define AR5K_XRMODE 0x80c0 /* Register Address */
@@ -2550,6 +2563,12 @@
#define AR5K_PHY_TPC_RG1 0xa258
#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000
#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14
+#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000
+#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16
+#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000
+#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18
+#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000
+#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20
#define AR5K_PHY_TPC_RG5 0xa26C
#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 685dc213edae..7a17d31b2fd9 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -664,29 +664,35 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ s16 cck_ofdm_pwr_delta;
- /* Set CCK to OFDM power delta */
- if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
- int16_t cck_ofdm_pwr_delta;
-
- /* Adjust power delta for channel 14 */
- if (channel->center_freq == 2484)
- cck_ofdm_pwr_delta =
- ((ee->ee_cck_ofdm_power_delta -
- ee->ee_scaled_cck_delta) * 2) / 10;
- else
- cck_ofdm_pwr_delta =
- (ee->ee_cck_ofdm_power_delta * 2) / 10;
+ /* Adjust power delta for channel 14 */
+ if (channel->center_freq == 2484)
+ cck_ofdm_pwr_delta =
+ ((ee->ee_cck_ofdm_power_delta -
+ ee->ee_scaled_cck_delta) * 2) / 10;
+ else
+ cck_ofdm_pwr_delta =
+ (ee->ee_cck_ofdm_power_delta * 2) / 10;
+ /* Set CCK to OFDM power delta on tx power
+ * adjustment register */
+ if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
if (channel->hw_value == CHANNEL_G)
ath5k_hw_reg_write(ah,
- AR5K_REG_SM((ee->ee_cck_ofdm_power_delta * -1),
+ AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
AR5K_PHY_TX_PWR_ADJ);
else
ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
+ } else {
+ /* For older revs we scale power on sw during tx power
+ * setup */
+ ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta;
+ ah->ah_txpower.txp_cck_ofdm_gainf_delta =
+ ee->ee_cck_ofdm_gain_delta;
}
/* Set antenna idle switch table */
@@ -994,7 +1000,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Set TX power (FIXME)
*/
- ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
+ ret = ath5k_hw_txpower(ah, channel, ee_mode,
+ AR5K_TUNE_DEFAULT_TXPOWER);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath9k/ahb.c
index 00cc7bb01f2e..0e65c51ba176 100644
--- a/drivers/net/wireless/ath9k/ahb.c
+++ b/drivers/net/wireless/ath9k/ahb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
* Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
*
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c
index a39eb760cbb7..6c5e887d50d7 100644
--- a/drivers/net/wireless/ath9k/ani.c
+++ b/drivers/net/wireless/ath9k/ani.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath9k/ani.h
index 7315761f6d74..08b4e7ed5ff0 100644
--- a/drivers/net/wireless/ath9k/ani.h
+++ b/drivers/net/wireless/ath9k/ani.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index b64be8e9a690..2689a08a2844 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -295,13 +295,9 @@ struct ath_tx_control {
enum ath9k_internal_frame_type frame_type;
};
-struct ath_xmit_status {
- int retries;
- int flags;
#define ATH_TX_ERROR 0x01
#define ATH_TX_XRETRY 0x02
#define ATH_TX_BAR 0x04
-};
/* All RSSI values are noise floor adjusted */
struct ath_tx_stat {
@@ -390,6 +386,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
struct ath_vif {
int av_bslot;
+ __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
enum nl80211_iftype av_opmode;
struct ath_buf *av_bcbuf;
struct ath_tx_control av_btxctl;
@@ -406,7 +403,7 @@ struct ath_vif {
* number of beacon intervals, the game's up.
*/
#define BSTUCK_THRESH (9 * ATH_BCBUF)
-#define ATH_BCBUF 1
+#define ATH_BCBUF 4
#define ATH_DEFAULT_BINTVAL 100 /* TU */
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 039c78136c50..ec995730632d 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -70,7 +70,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
ds = bf->bf_desc;
flags = ATH9K_TXDESC_NOACK;
- if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
+ if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) &&
(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
ds->ds_link = bf->bf_daddr; /* self-linked */
flags |= ATH9K_TXDESC_VEOL;
@@ -153,6 +154,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
bf->bf_mpdu = skb;
if (skb == NULL)
return NULL;
+ ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
+ avp->tsf_adjust;
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -253,7 +256,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
{
struct ath_softc *sc = aphy->sc;
struct ath_vif *avp;
- struct ieee80211_hdr *hdr;
struct ath_buf *bf;
struct sk_buff *skb;
__le64 tstamp;
@@ -316,42 +318,33 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
-
- /*
- * Calculate a TSF adjustment factor required for
- * staggered beacons. Note that we assume the format
- * of the beacon frame leaves the tstamp field immediately
- * following the header.
- */
+ /* Calculate a TSF adjustment factor required for staggered beacons. */
if (avp->av_bslot > 0) {
u64 tsfadjust;
- __le64 val;
int intval;
intval = sc->hw->conf.beacon_int ?
sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
/*
- * The beacon interval is in TU's; the TSF in usecs.
- * We figure out how many TU's to add to align the
- * timestamp then convert to TSF units and handle
- * byte swapping before writing it in the frame.
- * The hardware will then add this each time a beacon
- * frame is sent. Note that we align vif's 1..N
- * and leave vif 0 untouched. This means vap 0
- * has a timestamp in one beacon interval while the
- * others get a timestamp aligned to the next interval.
+ * Calculate the TSF offset for this beacon slot, i.e., the
+ * number of usecs that need to be added to the timestamp field
+ * in Beacon and Probe Response frames. Beacon slot 0 is
+ * processed at the correct offset, so it does not require TSF
+ * adjustment. Other slots are adjusted to get the timestamp
+ * close to the TBTT for the BSS.
*/
- tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
- val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
+ tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
DPRINTF(sc, ATH_DBG_BEACON,
"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
avp->av_bslot, intval, (unsigned long long)tsfadjust);
- hdr = (struct ieee80211_hdr *)skb->data;
- memcpy(&hdr[1], &val, sizeof(val));
- }
+ ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
+ avp->tsf_adjust;
+ } else
+ avp->tsf_adjust = cpu_to_le64(0);
bf->bf_mpdu = skb;
bf->bf_buf_addr = bf->bf_dmacontext =
@@ -447,8 +440,16 @@ void ath_beacon_tasklet(unsigned long data)
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf);
slot = ((tsftu % intval) * ATH_BCBUF) / intval;
- vif = sc->beacon.bslot[(slot + 1) % ATH_BCBUF];
- aphy = sc->beacon.bslot_aphy[(slot + 1) % ATH_BCBUF];
+ /*
+ * Reverse the slot order to get slot 0 on the TBTT offset that does
+ * not require TSF adjustment and other slots adding
+ * slot/ATH_BCBUF * beacon_int to timestamp. For example, with
+ * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 ..
+ * and slot 0 is at correct offset to TBTT.
+ */
+ slot = ATH_BCBUF - slot - 1;
+ vif = sc->beacon.bslot[slot];
+ aphy = sc->beacon.bslot_aphy[slot];
DPRINTF(sc, ATH_DBG_BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -728,6 +729,7 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
ath_beacon_config_ap(sc, &conf, avp);
break;
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
ath_beacon_config_adhoc(sc, &conf, avp, vif);
break;
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c
index c9446fb6b153..e2d62e97131c 100644
--- a/drivers/net/wireless/ath9k/calib.c
+++ b/drivers/net/wireless/ath9k/calib.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath9k/calib.h
index 32589e0c5018..1c74bd50700d 100644
--- a/drivers/net/wireless/ath9k/calib.h
+++ b/drivers/net/wireless/ath9k/calib.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c
index 82573cadb1ab..fdf9528fa49b 100644
--- a/drivers/net/wireless/ath9k/debug.c
+++ b/drivers/net/wireless/ath9k/debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath9k/debug.h
index 065268b8568f..7b0e5419d2bc 100644
--- a/drivers/net/wireless/ath9k/debug.h
+++ b/drivers/net/wireless/ath9k/debug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c
index 183c949bcca1..ffc36b0361c7 100644
--- a/drivers/net/wireless/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath9k/eeprom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -342,8 +342,7 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
{
#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
- struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
- u16 *eep_data;
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
int addr, eep_start_loc = 0;
eep_start_loc = 64;
@@ -353,8 +352,6 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
"Reading from EEPROM, not flash\n");
}
- eep_data = (u16 *)eep;
-
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
@@ -363,6 +360,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
}
eep_data++;
}
+
return true;
#undef SIZE_EEPROM_4K
}
@@ -379,16 +377,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
if (!ath9k_hw_use_flash(ah)) {
-
if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
&magic)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Reading Magic # failed\n");
return false;
}
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -401,16 +398,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
-
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "0x%04X ", *eepdata);
-
- if (((addr + 1) % 6) == 0)
- DPRINTF(ah->ah_sc,
- ATH_DBG_EEPROM, "\n");
}
} else {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Invalid EEPROM Magic. "
"endianness mismatch.\n");
return -EINVAL;
@@ -426,7 +416,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
else
el = ah->eeprom.map4k.baseEepHeader.length;
- if (el > sizeof(struct ar5416_eeprom_def))
+ if (el > sizeof(struct ar5416_eeprom_4k))
el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
else
el = el / sizeof(u16);
@@ -441,7 +431,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
u16 word;
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "EEPROM Endianness is not native.. Changing \n");
+ "EEPROM Endianness is not native.. Changing\n");
word = swab16(eep->baseEepHeader.length);
eep->baseEepHeader.length = word;
@@ -483,7 +473,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Bad EEPROM checksum 0x%x or revision 0x%04x\n",
sum, ah->eep_ops->get_eeprom_ver(ah));
return -EINVAL;
@@ -1203,57 +1193,63 @@ static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
}
}
-static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
- struct ath9k_channel *chan)
+static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
+ struct modal_eep_4k_header *pModal,
+ struct ar5416_eeprom_4k *eep,
+ u8 txRxAttenLocal, int regChainOffset)
{
- struct modal_eep_4k_header *pModal;
- struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
- int regChainOffset;
- u8 txRxAttenLocal;
- u8 ob[5], db1[5], db2[5];
- u8 ant_div_control1, ant_div_control2;
- u32 regVal;
-
-
- pModal = &eep->modalHeader;
-
- txRxAttenLocal = 23;
-
- REG_WRITE(ah, AR_PHY_SWITCH_COM,
- ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
-
- regChainOffset = 0;
REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
pModal->antCtrlChain[0]);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
- (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
- ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
- SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
- SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+ (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
+ ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_3) {
+ AR5416_EEP_MINOR_VER_3) {
txRxAttenLocal = pModal->txRxAttenCh[0];
+
REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
- pModal->xatten2Margin[0]);
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->xatten2Margin[0]);
REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
}
REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
- AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
+ AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
- AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+ AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
if (AR_SREV_9285_11(ah))
REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
+}
+
+static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct modal_eep_4k_header *pModal;
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ u8 txRxAttenLocal;
+ u8 ob[5], db1[5], db2[5];
+ u8 ant_div_control1, ant_div_control2;
+ u32 regVal;
+
+ pModal = &eep->modalHeader;
+ txRxAttenLocal = 23;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM,
+ ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
+
+ /* Single chain for 4K EEPROM*/
+ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal, 0);
/* Initialize Ant Diversity settings from EEPROM */
if (pModal->version == 3) {
@@ -1295,9 +1291,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
db2[4] = ((pModal->db2_234 >> 8) & 0xf);
} else if (pModal->version == 1) {
-
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "EEPROM Model version is set to 1 \n");
ob[0] = (pModal->ob_01 & 0xf);
ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf;
db1[0] = (pModal->db1_01 & 0xf);
@@ -1385,8 +1378,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
AR_PHY_SETTLING_SWITCH,
pModal->swSettleHt40);
}
-
- return true;
}
static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
@@ -1464,16 +1455,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
{
#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
- struct ar5416_eeprom_def *eep = &ah->eeprom.def;
- u16 *eep_data;
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
int addr, ar5416_eep_start_loc = 0x100;
- eep_data = (u16 *)eep;
-
for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
eep_data)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Unable to read eeprom region\n");
return false;
}
@@ -1492,17 +1480,14 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
bool need_swap = false;
int i, addr, size;
- if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
- &magic)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "Reading Magic # failed\n");
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n");
return false;
}
if (!ath9k_hw_use_flash(ah)) {
-
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -1516,18 +1501,11 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
-
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "0x%04X ", *eepdata);
-
- if (((addr + 1) % 6) == 0)
- DPRINTF(ah->ah_sc,
- ATH_DBG_EEPROM, "\n");
}
} else {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Invalid EEPROM Magic. "
- "endianness mismatch.\n");
+ "Endianness mismatch.\n");
return -EINVAL;
}
}
@@ -1556,7 +1534,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
u16 word;
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "EEPROM Endianness is not native.. Changing \n");
+ "EEPROM Endianness is not native.. Changing.\n");
word = swab16(eep->baseEepHeader.length);
eep->baseEepHeader.length = word;
@@ -1602,7 +1580,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Bad EEPROM checksum 0x%x or revision 0x%04x\n",
sum, ah->eep_ops->get_eeprom_ver(ah));
return -EINVAL;
@@ -1614,7 +1592,6 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
enum eeprom_param param)
{
-#define AR5416_VER_MASK (pBase->version & AR5416_EEP_VER_MINOR_MASK)
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct modal_eep_header *pModal = eep->modalHeader;
struct base_eep_header *pBase = &eep->baseEepHeader;
@@ -1681,21 +1658,73 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
default:
return 0;
}
-#undef AR5416_VER_MASK
}
-/* XXX: Clean me up, make me more legible */
-static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
+static void ath9k_hw_def_set_gain(struct ath_hw *ah,
+ struct modal_eep_header *pModal,
+ struct ar5416_eeprom_def *eep,
+ u8 txRxAttenLocal, int regChainOffset, int i)
+{
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->bswMargin[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->bswAtten[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->xatten2Margin[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB,
+ pModal->xatten2Db[i]);
+ } else {
+ REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
+ | SM(pModal-> bswMargin[i],
+ AR_PHY_GAIN_2GHZ_BSW_MARGIN));
+ REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
+ | SM(pModal->bswAtten[i],
+ AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+ }
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
+ ~AR_PHY_RXGAIN_TXRX_ATTEN)
+ | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ + regChainOffset,
+ (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
+ SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+ }
+}
+
+static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
-#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
struct modal_eep_header *pModal;
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
int i, regChainOffset;
u8 txRxAttenLocal;
pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
-
txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
REG_WRITE(ah, AR_PHY_SWITCH_COM,
@@ -1708,8 +1737,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
}
if (AR_SREV_5416_20_OR_LATER(ah) &&
- (ah->rxchainmask == 5 || ah->txchainmask == 5)
- && (i != 0))
+ (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
regChainOffset = (i == 1) ? 0x2000 : 0x1000;
else
regChainOffset = i * 0x1000;
@@ -1718,9 +1746,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
pModal->antCtrlChain[i]);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
- (REG_READ(ah,
- AR_PHY_TIMING_CTRL4(0) +
- regChainOffset) &
+ (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
SM(pModal->iqCalICh[i],
@@ -1728,87 +1754,9 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
SM(pModal->iqCalQCh[i],
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
- if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
- txRxAttenLocal = pModal->txRxAttenCh[i];
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- REG_RMW_FIELD(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
- pModal->
- bswMargin[i]);
- REG_RMW_FIELD(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN1_DB,
- pModal->
- bswAtten[i]);
- REG_RMW_FIELD(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
- pModal->
- xatten2Margin[i]);
- REG_RMW_FIELD(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- AR_PHY_GAIN_2GHZ_XATTEN2_DB,
- pModal->
- xatten2Db[i]);
- } else {
- REG_WRITE(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- (REG_READ(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
- | SM(pModal->
- bswMargin[i],
- AR_PHY_GAIN_2GHZ_BSW_MARGIN));
- REG_WRITE(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- (REG_READ(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
- | SM(pModal->bswAtten[i],
- AR_PHY_GAIN_2GHZ_BSW_ATTEN));
- }
- }
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- REG_RMW_FIELD(ah,
- AR_PHY_RXGAIN +
- regChainOffset,
- AR9280_PHY_RXGAIN_TXRX_ATTEN,
- txRxAttenLocal);
- REG_RMW_FIELD(ah,
- AR_PHY_RXGAIN +
- regChainOffset,
- AR9280_PHY_RXGAIN_TXRX_MARGIN,
- pModal->rxTxMarginCh[i]);
- } else {
- REG_WRITE(ah,
- AR_PHY_RXGAIN + regChainOffset,
- (REG_READ(ah,
- AR_PHY_RXGAIN +
- regChainOffset) &
- ~AR_PHY_RXGAIN_TXRX_ATTEN) |
- SM(txRxAttenLocal,
- AR_PHY_RXGAIN_TXRX_ATTEN));
- REG_WRITE(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset,
- (REG_READ(ah,
- AR_PHY_GAIN_2GHZ +
- regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
- SM(pModal->rxTxMarginCh[i],
- AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
- }
- }
+ if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah))
+ ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
+ regChainOffset, i);
}
if (AR_SREV_9280_10_OR_LATER(ah)) {
@@ -1855,8 +1803,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
AR_AN_TOP2_LOCALBIAS,
AR_AN_TOP2_LOCALBIAS_S,
pModal->local_bias);
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
- pModal->force_xpaon);
REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
pModal->force_xpaon);
}
@@ -1882,6 +1828,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
pModal->txEndToRxOn);
+
if (AR_SREV_9280_10_OR_LATER(ah)) {
REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
pModal->thresh62);
@@ -1912,10 +1859,10 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
}
if (AR_SREV_9280_20_OR_LATER(ah) &&
- AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
- AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
- pModal->miscBits);
+ AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
+ pModal->miscBits);
if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
@@ -1926,18 +1873,15 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0);
else
REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
- eep->baseEepHeader.dacLpMode);
+ eep->baseEepHeader.dacLpMode);
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
- pModal->miscBits >> 2);
+ pModal->miscBits >> 2);
REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9,
- AR_PHY_TX_DESIRED_SCALE_CCK,
- eep->baseEepHeader.desiredScaleCCK);
+ AR_PHY_TX_DESIRED_SCALE_CCK,
+ eep->baseEepHeader.desiredScaleCCK);
}
-
- return true;
-#undef AR5416_VER_MASK
}
static void ath9k_hw_def_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h
index d6f6108f63c7..25b68c881ff1 100644
--- a/drivers/net/wireless/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath9k/eeprom.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -95,6 +95,7 @@
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
+#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
@@ -489,7 +490,7 @@ struct eeprom_ops {
u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
struct ath9k_channel *chan);
- bool (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
+ void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
u16 cfgCtl, u8 twiceAntennaReduction,
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index d494e98ba971..b15eaf8417ff 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -588,6 +588,10 @@ static int ath9k_hw_post_attach(struct ath_hw *ah)
ecode = ath9k_hw_eeprom_attach(ah);
if (ecode != 0)
return ecode;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n",
+ ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah));
+
ecode = ath9k_hw_rfattach(ah);
if (ecode != 0)
return ecode;
@@ -1444,6 +1448,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
| AR_STA_ID1_KSRCH_MODE);
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
@@ -2273,11 +2278,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
else
ath9k_hw_spur_mitigate(ah, chan);
- if (!ah->eep_ops->set_board_values(ah, chan)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "error setting board options\n");
- return -EIO;
- }
+ ah->eep_ops->set_board_values(ah, chan);
ath9k_hw_decrease_chain_power(ah, chan);
@@ -3149,6 +3150,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
flags |= AR_TBTT_TIMER_EN;
break;
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
REG_WRITE(ah, AR_NEXT_NDP_TIMER,
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index dc681f011fdf..0b594e0ee260 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
index 1d60c3706f1c..e2f0a34b79a1 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c
index f757bc7eec68..e0a6dee45839 100644
--- a/drivers/net/wireless/ath9k/mac.c
+++ b/drivers/net/wireless/ath9k/mac.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath9k/mac.h
index a75f65dae1d7..1176bce8b76c 100644
--- a/drivers/net/wireless/ath9k/mac.h
+++ b/drivers/net/wireless/ath9k/mac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 8db75f6de53e..13d4e6756c99 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -940,18 +940,25 @@ static void ath_led_blink_work(struct work_struct *work)
if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
return;
- ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
(sc->sc_flags & SC_OP_LED_ON) ?
msecs_to_jiffies(sc->led_off_duration) :
msecs_to_jiffies(sc->led_on_duration));
- sc->led_on_duration =
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25);
- sc->led_off_duration =
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10);
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
sc->led_on_cnt = sc->led_off_cnt = 0;
if (sc->sc_flags & SC_OP_LED_ON)
sc->sc_flags &= ~SC_OP_LED_ON;
@@ -1592,7 +1599,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->reg_notifier = ath9k_reg_notifier;
hw->wiphy->strict_regulatory = true;
@@ -2200,18 +2208,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ic_opmode = NL80211_IFTYPE_STATION;
break;
case NL80211_IFTYPE_ADHOC:
- if (sc->nbcnvifs >= ATH_BCBUF) {
- ret = -ENOBUFS;
- goto out;
- }
- ic_opmode = NL80211_IFTYPE_ADHOC;
- break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
if (sc->nbcnvifs >= ATH_BCBUF) {
ret = -ENOBUFS;
goto out;
}
- ic_opmode = NL80211_IFTYPE_AP;
+ ic_opmode = conf->type;
break;
default:
DPRINTF(sc, ATH_DBG_FATAL,
@@ -2247,7 +2250,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
* Note we only do this (at the moment) for station mode.
*/
if ((conf->type == NL80211_IFTYPE_STATION) ||
- (conf->type == NL80211_IFTYPE_ADHOC)) {
+ (conf->type == NL80211_IFTYPE_ADHOC) ||
+ (conf->type == NL80211_IFTYPE_MESH_POINT)) {
if (ath9k_hw_phycounters(sc->sc_ah))
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
@@ -2294,8 +2298,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
del_timer_sync(&sc->ani.timer);
/* Reclaim beacon resources */
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) {
+ if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
ath_beacon_return(sc, avp);
}
@@ -2428,6 +2433,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
/* Set BSSID */
memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
memcpy(avp->bssid, conf->bssid, ETH_ALEN);
@@ -2451,7 +2457,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
}
if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP)) {
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
if ((conf->changed & IEEE80211_IFCC_BEACON) ||
(conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
conf->enable_beacon)) {
@@ -2723,7 +2730,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_RESUME:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
ath_tx_aggr_resume(sc, sta, tid);
break;
default:
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c
index 9a58baabb9ca..6dbc58580abb 100644
--- a/drivers/net/wireless/ath9k/pci.c
+++ b/drivers/net/wireless/ath9k/pci.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -87,7 +87,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
- u32 val;
int ret = 0;
struct ath_hw *ah;
@@ -134,14 +133,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- /*
- * Disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
ret = pci_request_region(pdev, 0, "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -253,21 +244,12 @@ static int ath_pci_resume(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- u32 val;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
- /*
- * Suspend/Resume resets the PCI configuration space, so we have to
- * re-disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
index e1494bae0f9f..8bcba906929a 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 1eac8c707342..0f7f8e0c9c95 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 832735677a46..824ccbb8b7b8 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2008 Atheros Communications, Inc.
+ * Copyright (c) 2004-2009 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -864,6 +864,8 @@ static void ath_rc_ratefind(struct ath_softc *sc,
rate_table, nrix, 1, 0);
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
try_per_rate, nrix, 0);
+
+ tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
try_per_rate = (ATH_11N_TXMAXTRY/4);
/* Set the choosen rate. No RTS for first series entry. */
@@ -1468,16 +1470,18 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->ht_cap);
}
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, bool is_ht, bool is_cw40,
- bool is_sgi40)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
+ bool is_cw40, bool is_sgi40)
{
u8 caps = 0;
- if (is_ht) {
+ if (sta->ht_cap.ht_supported) {
caps = WLAN_RC_HT_FLAG;
if (sc->sc_ah->caps.tx_chainmask != 1 &&
- ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL))
- caps |= WLAN_RC_DS_FLAG;
+ ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
+ }
if (is_cw40)
caps |= WLAN_RC_40_FLAG;
if (is_sgi40)
@@ -1615,6 +1619,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
/* Choose rate table first */
if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
rate_table = ath_choose_rate_table(sc, sband->band,
sta->ht_cap.ht_supported,
@@ -1624,8 +1629,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
rate_table = sc->cur_rate_table;
}
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta->ht_cap.ht_supported,
- is_cw40, is_sgi40);
+ ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
}
@@ -1659,8 +1663,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
rate_table = ath_choose_rate_table(sc, sband->band,
sta->ht_cap.ht_supported,
oper_cw40);
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc,
- sta->ht_cap.ht_supported,
+ ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
oper_cw40, oper_sgi40);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index db9b0b9a3431..199a3ce57d64 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004 Sam Leffler, Errno Consulting
* Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 0bba17662a1f..71cb18d6757d 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -344,8 +344,13 @@ void ath_rx_cleanup(struct ath_softc *sc)
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = bf->bf_mpdu;
- if (skb)
+ if (skb) {
+ dma_unmap_single(sc->dev,
+ bf->bf_buf_addr,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
+ }
}
if (sc->rx.rxdma.dd_desc_len != 0)
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index d86e90e38173..52605246679f 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
index b8f9b6d6bec4..4ca625102291 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
index 8f885f3bc8df..9f5fbd4eea7a 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index b41d0002f3fe..4d0e298cd1c7 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index e3f376611f85..689bdbf78808 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -64,6 +64,10 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
+static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
+ int txok);
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+ int nbad, int txok, bool update_rc);
/*********************/
/* Aggregation logic */
@@ -274,9 +278,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct ath_desc *ds = bf_last->bf_desc;
struct list_head bf_head, bf_pending;
- u16 seq_st = 0;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
- int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
+ int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
+ bool rc_update = true;
skb = (struct sk_buff *)bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -316,6 +321,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
+ nbad = ath_tx_num_badfrms(sc, bf, txok);
while (bf) {
txfail = txpending = 0;
bf_next = bf->bf_next;
@@ -323,8 +329,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
/* transmit completion, subframe is
* acked by block ack */
+ acked_cnt++;
} else if (!isaggr && txok) {
/* transmit completion */
+ acked_cnt++;
} else {
if (!(tid->state & AGGR_CLEANUP) &&
ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
@@ -335,6 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type |= BUF_XRETRY;
txfail = 1;
sendbar = 1;
+ txfail_cnt++;
}
} else {
/*
@@ -361,6 +370,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, bf->bf_seqno);
spin_unlock_bh(&txq->axq_lock);
+ if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
+ ath_tx_rc_status(bf, ds, nbad, txok, true);
+ rc_update = false;
+ } else {
+ ath_tx_rc_status(bf, ds, nbad, txok, false);
+ }
+
ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
} else {
/* retry the un-acked ones */
@@ -1734,7 +1750,7 @@ exit:
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_xmit_status *tx_status)
+ int tx_flags)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1755,18 +1771,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->rate_driver_data[0] = NULL;
}
- if (tx_status->flags & ATH_TX_BAR) {
+ if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
- tx_status->flags &= ~ATH_TX_BAR;
- }
- if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
+ if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
/* Frame was ACKed */
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- tx_info->status.rates[0].count = tx_status->retries + 1;
-
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
padsize = hdrlen & 3;
if (padsize && hdrlen >= 24) {
@@ -1789,29 +1801,22 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
- struct ath_xmit_status tx_status;
unsigned long flags;
+ int tx_flags = 0;
- /*
- * Set retry information.
- * NB: Don't use the information in the descriptor, because the frame
- * could be software retried.
- */
- tx_status.retries = bf->bf_retries;
- tx_status.flags = 0;
if (sendbar)
- tx_status.flags = ATH_TX_BAR;
+ tx_flags = ATH_TX_BAR;
if (!txok) {
- tx_status.flags |= ATH_TX_ERROR;
+ tx_flags |= ATH_TX_ERROR;
if (bf_isxretried(bf))
- tx_status.flags |= ATH_TX_XRETRY;
+ tx_flags |= ATH_TX_XRETRY;
}
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
- ath_tx_complete(sc, skb, &tx_status);
+ ath_tx_complete(sc, skb, tx_flags);
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -1852,27 +1857,40 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
return nbad;
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+ int nbad, int txok, bool update_rc)
{
struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
+ struct ieee80211_hw *hw = tx_info_priv->aphy->hw;
+ u8 i, tx_rateindex;
+
+ if (txok)
+ tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
- tx_info_priv->update_rc = false;
+ tx_rateindex = ds->ds_txstat.ts_rateindex;
+ WARN_ON(tx_rateindex >= hw->max_rates);
+
+ tx_info_priv->update_rc = update_rc;
if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
- (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
+ (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
memcpy(&tx_info_priv->tx, &ds->ds_txstat,
sizeof(tx_info_priv->tx));
tx_info_priv->n_frames = bf->bf_nframes;
tx_info_priv->n_bad_frames = nbad;
- tx_info_priv->update_rc = true;
}
}
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++)
+ tx_info->status.rates[i].count = 0;
+
+ tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
}
static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
@@ -1897,7 +1915,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
- int txok, nbad = 0;
+ int txok;
int status;
DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
@@ -1991,13 +2009,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
bf->bf_retries = ds->ds_txstat.ts_longretry;
if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- nbad = 0;
- } else {
- nbad = ath_tx_num_badfrms(sc, bf, txok);
+ ath_tx_rc_status(bf, ds, 0, txok, true);
}
- ath_tx_rc_status(bf, ds, nbad);
-
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b72ef3fd315a..4896e0831114 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3993,6 +3993,8 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
dev->irq_reason = 0;
memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
dev->irq_savedstate = B43_IRQ_MASKTEMPLATE;
+ if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
+ dev->irq_savedstate &= ~B43_IRQ_PHY_TXERR;
dev->mac_suspended = 1;
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 0f53c7e5e01e..a63d88841df8 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -50,7 +50,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
}
/* Extract the bitrate index out of an OFDM PLCP header. */
-static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
+static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
{
int base = aphy ? 0 : 4;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 205603d082aa..73f93a0ff2df 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -233,7 +233,7 @@ struct iwl3945_eeprom {
#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
#define TFD_QUEUE_MIN 0
-#define TFD_QUEUE_MAX 6
+#define TFD_QUEUE_MAX 5 /* 4 DATA + 1 CMD */
#define IWL_NUM_SCAN_RATES (2)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index f65c308a6714..af6b9d444778 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -124,7 +124,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
#define IWL39_RATE_HIGH_TH 11520
#define IWL_SUCCESS_UP_TH 8960
#define IWL_SUCCESS_DOWN_TH 10880
-#define IWL_RATE_MIN_FAILURE_TH 8
+#define IWL_RATE_MIN_FAILURE_TH 6
#define IWL_RATE_MIN_SUCCESS_TH 8
#define IWL_RATE_DECREASE_TH 1920
#define IWL_RATE_RETRY_TH 15
@@ -488,7 +488,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
IWL_DEBUG_RATE(priv, "enter\n");
- retries = info->status.rates[0].count - 1;
+ retries = info->status.rates[0].count;
/* Sanity Check for retries */
if (retries > IWL_RATE_RETRY_TH)
retries = IWL_RATE_RETRY_TH;
@@ -791,16 +791,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
scale_action = -1;
-
/* No throughput measured yet for adjacent rates,
* try increase */
} else if ((low_tpt == IWL_INVALID_VALUE) &&
(high_tpt == IWL_INVALID_VALUE)) {
- if (high != IWL_RATE_INVALID && window->success_counter >= IWL_RATE_INCREASE_TH)
+ if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
scale_action = 1;
else if (low != IWL_RATE_INVALID)
- scale_action = -1;
+ scale_action = 0;
/* Both adjacent throughputs are measured, but neither one has
* better throughput; we're using the best rate, don't change
@@ -826,14 +825,14 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
else {
IWL_DEBUG_RATE(priv,
"decrease rate because of high tpt\n");
- scale_action = -1;
+ scale_action = 0;
}
} else if (low_tpt != IWL_INVALID_VALUE) {
if (low_tpt > current_tpt) {
IWL_DEBUG_RATE(priv,
"decrease rate because of low tpt\n");
scale_action = -1;
- } else if (window->success_counter >= IWL_RATE_INCREASE_TH) {
+ } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
/* Lower rate has better
* throughput,decrease rate */
scale_action = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index ba7e720e73c1..2399328e8de7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -293,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
(txq_id != IWL_CMD_QUEUE_NUM) &&
priv->mac80211_registered)
- ieee80211_wake_queue(priv->hw, txq_id);
+ iwl_wake_queue(priv, txq_id);
}
/**
@@ -747,11 +747,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
int i;
int counter;
- /* classify bd */
- if (txq->q.id == IWL_CMD_QUEUE_NUM)
- /* nothing to cleanup after for host commands */
- return;
-
/* sanity check */
counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
if (counter > NUM_TFD_CHUNKS) {
@@ -1046,7 +1041,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
goto error;
/* Tx queue(s) */
- for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
+ for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
@@ -1184,7 +1179,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
- if(rc)
+ if (rc)
return rc;
priv->cfg->ops->lib->apm_ops.config(priv);
@@ -1239,8 +1234,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
int txq_id;
/* Tx queues */
- for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++)
- iwl_tx_queue_free(priv, txq_id);
+ for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+
}
void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1259,7 +1258,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
/* reset TFD queues */
- for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
+ for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
@@ -2488,6 +2487,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
return -ENOMEM;
}
+ /* Assign number of Usable TX queues */
+ priv->hw_params.max_txq_num = TFD_QUEUE_MAX;
+
priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
priv->hw_params.max_pkt_size = 2342;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index bd0140be774e..847a6220c5e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2178,10 +2178,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
if (agg->state == IWL_AGG_OFF)
- ieee80211_wake_queue(priv->hw, txq_id);
+ iwl_wake_queue(priv, txq_id);
else
- ieee80211_wake_queue(priv->hw,
- txq->swq_id);
+ iwl_wake_queue(priv, txq->swq_id);
}
}
} else {
@@ -2205,7 +2204,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
- ieee80211_wake_queue(priv->hw, txq_id);
+ iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 08c19bea71e3..e5ca2511a81a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1077,7 +1077,7 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
- IWL_WARN(priv,
+ IWL_ERR(priv,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWL50_FIRST_AMPDU_QUEUE,
IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
@@ -1295,10 +1295,9 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
if (agg->state == IWL_AGG_OFF)
- ieee80211_wake_queue(priv->hw, txq_id);
+ iwl_wake_queue(priv, txq_id);
else
- ieee80211_wake_queue(priv->hw,
- txq->swq_id);
+ iwl_wake_queue(priv, txq->swq_id);
}
}
} else {
@@ -1324,7 +1323,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
- ieee80211_wake_queue(priv->hw, txq_id);
+ iwl_wake_queue(priv, txq_id);
}
if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0db3bc011ac2..663dc83be501 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1567,9 +1567,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
if (iwl_is_associated(priv)) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&priv->active_rxon;
-
- memcpy(&priv->staging_rxon, &priv->active_rxon,
- sizeof(priv->staging_rxon));
+ /* apply any changes in staging */
+ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
@@ -2184,110 +2183,112 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
struct iwl_priv *priv = hw->priv;
const struct iwl_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
- unsigned long flags;
+ unsigned long flags = 0;
int ret = 0;
- u16 channel;
+ u16 ch;
+ int scan_active = 0;
mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter to channel %d\n", conf->channel->hw_value);
+ IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+ conf->channel->hw_value, changed);
- priv->current_ht_config.is_ht = conf_is_ht(conf);
-
- if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n");
- goto out;
+ if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+ test_bit(STATUS_SCANNING, &priv->status))) {
+ scan_active = 1;
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
}
- if (!conf->radio_enabled)
- iwl_radio_kill_sw_disable_radio(priv);
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- ret = -EIO;
- goto out;
- }
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
- if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
- mutex_unlock(&priv->mutex);
- return 0;
- }
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !is_channel_ibss(ch_info)) {
+ IWL_ERR(priv, "channel %d in band %d not "
+ "IBSS channel\n",
+ conf->channel->hw_value, conf->channel->band);
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
- channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto out;
- }
+ priv->current_ht_config.is_ht = conf_is_ht(conf);
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d in band %d not IBSS channel\n",
- conf->channel->hw_value, conf->channel->band);
- ret = -EINVAL;
- goto out;
- }
+ spin_lock_irqsave(&priv->lock, flags);
- spin_lock_irqsave(&priv->lock, flags);
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
+ priv->staging_rxon.flags = 0;
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
-#ifdef IEEE80211_CONF_CHANNEL_SWITCH
- && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
-#endif
- )
- priv->staging_rxon.flags = 0;
+ iwl_set_rxon_channel(priv, conf->channel);
- iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_flags_for_band(priv, conf->channel->band);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ iwl_set_rate(priv);
+ }
- iwl_set_flags_for_band(priv, conf->channel->band);
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS)
+ ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
+ else
+ ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
+ if (ret)
+ IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_set_rate(priv);
+ }
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
-#ifdef IEEE80211_CONF_CHANNEL_SWITCH
- if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
- iwl_hw_channel_switch(priv, conf->channel);
- goto out;
+ iwl_set_tx_power(priv, conf->power_level, false);
+ }
+
+ /* call to ensure that 4965 rx_chain is set properly in monitor mode */
+ iwl_set_rxon_chain(priv);
+
+ if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
+ if (conf->radio_enabled &&
+ iwl_radio_kill_sw_enable_radio(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
+ "waiting for uCode\n");
+ goto out;
+ }
+
+ if (!conf->radio_enabled)
+ iwl_radio_kill_sw_disable_radio(priv);
}
-#endif
if (!conf->radio_enabled) {
IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
goto out;
}
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF kill\n");
- ret = -EIO;
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
- if (conf->flags & IEEE80211_CONF_PS)
- ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
- else
- ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
-
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_set_tx_power(priv, conf->power_level, false);
-
- iwl_set_rate(priv);
-
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- iwl_set_rxon_chain(priv);
+ if (scan_active)
+ goto out;
if (memcmp(&priv->active_rxon,
&priv->staging_rxon, sizeof(priv->staging_rxon)))
@@ -2295,9 +2296,9 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
else
IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n");
- IWL_DEBUG_MAC80211(priv, "leave\n");
out:
+ IWL_DEBUG_MAC80211(priv, "leave\n");
mutex_unlock(&priv->mutex);
return ret;
}
@@ -2682,6 +2683,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_priv *priv = hw->priv;
+ int ret;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -2695,13 +2697,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
- return iwl_sta_rx_agg_stop(priv, sta->addr, tid);
+ ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return 0;
+ else
+ return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
- return iwl_tx_agg_stop(priv, sta->addr, tid);
+ ret = iwl_tx_agg_stop(priv, sta->addr, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return 0;
+ else
+ return ret;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -3083,11 +3093,6 @@ static ssize_t store_power_level(struct device *d,
mutex_lock(&priv->mutex);
- if (!iwl_is_ready(priv)) {
- ret = -EAGAIN;
- goto out;
- }
-
ret = strict_strtoul(buf, 10, &mode);
if (ret)
goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 085e9cf1cac9..c54fb93e9d72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1298,6 +1298,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_SUPPORTS_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -1308,9 +1309,6 @@ int iwl_setup_mac(struct iwl_priv *priv)
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
- /* queues to support 11n aggregation */
- if (priv->cfg->sku & IWL_SKU_N)
- hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
hw->conf.beacon_int = 100;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
@@ -1437,6 +1435,10 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
priv->tx_power_user_lmt = tx_power;
+ /* if nic is not up don't send command */
+ if (!iwl_is_ready_rf(priv))
+ return ret;
+
if (force && priv->cfg->ops->lib->send_tx_power)
ret = priv->cfg->ops->lib->send_tx_power(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27310fec2e43..a8eac8c3c1fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -264,6 +264,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
* RX
******************************************************/
void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 36cfeccfafbc..64eb585f1578 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -425,6 +425,56 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
return ret;
}
+static ssize_t iwl_dbgfs_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+ test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
+ test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+ test_bit(STATUS_INT_ENABLED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n",
+ test_bit(STATUS_RF_KILL_SW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
+ test_bit(STATUS_INIT, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+ test_bit(STATUS_ALIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+ test_bit(STATUS_READY, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
+ test_bit(STATUS_TEMPERATURE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+ test_bit(STATUS_EXIT_PENDING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_IN_SUSPEND:\t %d\n",
+ test_bit(STATUS_IN_SUSPEND, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+ test_bit(STATUS_STATISTICS, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+ test_bit(STATUS_SCANNING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+ test_bit(STATUS_SCAN_ABORTING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+ test_bit(STATUS_SCAN_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+ test_bit(STATUS_POWER_PMI, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+ test_bit(STATUS_FW_ERROR, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
+ test_bit(STATUS_MODE_PENDING, &priv->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(sram);
DEBUGFS_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(eeprom);
@@ -432,6 +482,7 @@ DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
/*
* Create the debugfs files and directories
@@ -466,7 +517,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(rx_statistics, data);
DEBUGFS_ADD_FILE(tx_statistics, data);
DEBUGFS_ADD_FILE(channels, data);
- DEBUGFS_ADD_X32(status, data, (u32 *)&priv->status);
+ DEBUGFS_ADD_FILE(status, data);
DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
&priv->disable_chain_noise_cal);
@@ -496,6 +547,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
DEBUGFS_REMOVE(priv->dbgfs->dir_data);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 0baae8022824..ec9a13846edd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -996,6 +996,12 @@ struct iwl_priv {
u8 key_mapping_key;
unsigned long ucode_key_table;
+ /* queue refcounts */
+#define IWL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index fb64d297dd4e..a1328c3c81ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -93,4 +93,56 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW A-MPDU queue
+ * |
+ * +---------------------- indicates agg queue
+ */
+static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only have 5 bits */
+
+ return 0x80 | (hwq << 2) | ac;
+}
+
+static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue)
+{
+ u8 ac = queue;
+ u8 hwq = queue;
+
+ if (queue & 0x80) {
+ ac = queue & 3;
+ hwq = (queue >> 2) & 0x1f;
+ }
+
+ if (test_and_clear_bit(hwq, priv->queue_stopped))
+ if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(priv->hw, ac);
+}
+
+static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue)
+{
+ u8 ac = queue;
+ u8 hwq = queue;
+
+ if (queue & 0x80) {
+ ac = queue & 3;
+ hwq = (queue >> 2) & 0x1f;
+ }
+
+ if (!test_and_set_bit(hwq, priv->queue_stopped))
+ if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(priv->hw, ac);
+}
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 18b7e4195ea1..47c894530eb5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -273,7 +273,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
if (priv->iw_mode != NL80211_IFTYPE_STATION)
final_mode = IWL_POWER_MODE_CAM;
- if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
+ if (iwl_is_ready_rf(priv) && !setting->power_disabled &&
((setting->power_mode != final_mode) || force)) {
struct iwl_powertable_cmd cmd;
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 1684490d93c0..5798fe49c771 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1138,8 +1138,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
int sta_id;
sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION)
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
+ }
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index dff60fb70214..1f117a49c569 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
* Free all buffers.
* 0-fill, but do not free "txq" descriptor structure.
*/
-static void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
@@ -193,12 +193,14 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, sizeof(struct iwl_tfd) *
+ pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
+EXPORT_SYMBOL(iwl_cmd_queue_free);
+
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*
@@ -761,8 +763,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
+ }
priv->stations[sta_id].tid[tid].tfds_in_queue++;
}
@@ -893,7 +897,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
} else {
- ieee80211_stop_queue(priv->hw, txq->swq_id);
+ iwl_stop_queue(priv, txq->swq_id);
}
}
@@ -1221,8 +1225,10 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
sta_id = iwl_find_station(priv, ra);
- if (sta_id == IWL_INVALID_STATION)
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
+ }
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
@@ -1429,7 +1435,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- ieee80211_wake_queue(priv->hw, txq->swq_id);
+ iwl_wake_queue(priv, txq->swq_id);
iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4465320f2735..a71b08ca7c71 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -485,14 +485,14 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
keyconf->keylen);
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
+ priv->stations_39[sta_id].sta.key.key_offset =
iwl_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
priv->stations_39[sta_id].sta.key.key_flags = key_flags;
@@ -560,7 +560,7 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
break;
default:
- IWL_ERR(priv,"Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
+ IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
ret = -EINVAL;
}
@@ -1168,7 +1168,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags);
}
- ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
+ iwl_stop_queue(priv, skb_get_queue_mapping(skb));
}
return 0;
@@ -3773,15 +3773,19 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
}
#endif
- if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n");
- goto out;
- }
+ if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
+ if (conf->radio_enabled &&
+ iwl_radio_kill_sw_enable_radio(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
+ "waiting for uCode\n");
+ goto out;
+ }
- if (!conf->radio_enabled) {
- iwl_radio_kill_sw_disable_radio(priv);
- IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
- goto out;
+ if (!conf->radio_enabled) {
+ iwl_radio_kill_sw_disable_radio(priv);
+ IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
+ goto out;
+ }
}
if (iwl_is_rfkill(priv)) {
@@ -4546,11 +4550,6 @@ static ssize_t store_power_level(struct device *d,
mutex_lock(&priv->mutex);
- if (!iwl_is_ready(priv)) {
- ret = -EAGAIN;
- goto out;
- }
-
ret = strict_strtoul(buf, 10, &mode);
if (ret)
goto out;
@@ -4905,7 +4904,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_NOISE_DBM |
+ IEEE80211_HW_SPECTRUM_MGMT;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index f8eb9097ff0a..d16b26416e82 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -33,22 +33,12 @@ struct rx_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
u8 flags;
u8 rate;
- u16 chan_freq;
- u16 chan_flags;
- u8 antenna;
u8 antsignal;
- u16 rx_flags;
-#if 0
- u8 pad[IEEE80211_RADIOTAP_HDRLEN - 18];
-#endif
} __attribute__ ((packed));
#define RX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
- (1 << IEEE80211_RADIOTAP_CHANNEL) | \
- (1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\
- (1 << IEEE80211_RADIOTAP_RX_FLAGS) | \
0)
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 4f60948dde9c..63d7e19ce9bd 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -351,19 +351,11 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
radiotap_hdr.hdr.it_pad = 0;
radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
- /* unknown values */
- radiotap_hdr.flags = 0;
- radiotap_hdr.chan_freq = 0;
- radiotap_hdr.chan_flags = 0;
- radiotap_hdr.antenna = 0;
- /* known values */
+ if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
+ radiotap_hdr.flags |= IEEE80211_RADIOTAP_F_BADFCS;
radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
/* XXX must check no carryout */
radiotap_hdr.antsignal = prxpd->snr + prxpd->nf;
- radiotap_hdr.rx_flags = 0;
- if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
- radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS;
- //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18);
/* chop the rxpd */
skb_pull(skb, sizeof(struct rxpd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2368b7f825a2..d4fdc8b7d7d8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -933,7 +933,6 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
- hw->ampdu_queues = 1;
hw->flags = IEEE80211_HW_MFP_CAPABLE;
@@ -1041,6 +1040,9 @@ static int __init init_mac80211_hwsim(void)
break;
}
+ /* give the regulatory workqueue a chance to run */
+ if (regtest)
+ schedule_timeout_interruptible(1);
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: "
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index cfc5f41aa136..b45d6a4ed1e8 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,9 +1,10 @@
config P54_COMMON
tristate "Softmac Prism54 support"
- depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL
+ depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
+ select FW_LOADER
---help---
- This is common code for isl38xx based cards.
- This module does nothing by itself - the USB/PCI frontends
+ This is common code for isl38xx/stlc45xx based modules.
+ This module does nothing by itself - the USB/PCI/SPI front-ends
also need to be enabled in order to support any devices.
These devices require softmac firmware which can be found at
@@ -17,31 +18,6 @@ config P54_USB
select CRC32
---help---
This driver is for USB isl38xx based wireless cards.
- These are USB based adapters found in devices such as:
-
- 3COM 3CRWE254G72
- SMC 2862W-G
- Accton 802.11g WN4501 USB
- Siemens Gigaset USB
- Netgear WG121
- Netgear WG111
- Medion 40900, Roper Europe
- Shuttle PN15, Airvast WM168g, IOGear GWU513
- Linksys WUSB54G
- Linksys WUSB54G Portable
- DLink DWL-G120 Spinnaker
- DLink DWL-G122
- Belkin F5D7050 ver 1000
- Cohiba Proto board
- SMC 2862W-G version 2
- U.S. Robotics U5 802.11g Adapter
- FUJITSU E-5400 USB D1700
- Sagem XG703A
- DLink DWL-G120 Cohiba
- Spinnaker Proto board
- Linksys WUSB54AG
- Inventel UR054G
- Spinnaker DUT
These devices require softmac firmware which can be found at
http://prism54.org/
@@ -64,10 +40,15 @@ config P54_PCI
config P54_SPI
tristate "Prism54 SPI (stlc45xx) support"
- depends on P54_COMMON && SPI_MASTER
+ depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
---help---
This driver is for stlc4550 or stlc4560 based wireless chips.
This driver is experimental, untested and will probably only work on
Nokia's N800/N810 Portable Internet Tablet.
If you choose to build a module, it'll be called p54spi.
+
+config P54_LEDS
+ bool
+ depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)
+ default y
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 0a989834b70d..0c1b0577d4ee 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -21,9 +21,9 @@
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#ifdef CONFIG_MAC80211_LEDS
+#ifdef CONFIG_P54_LEDS
#include <linux/leds.h>
-#endif /* CONFIG_MAC80211_LEDS */
+#endif /* CONFIG_P54_LEDS */
#include "p54.h"
#include "p54common.h"
@@ -2420,7 +2420,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
return 0;
}
-#ifdef CONFIG_MAC80211_LEDS
+#ifdef CONFIG_P54_LEDS
static void p54_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
@@ -2508,7 +2508,7 @@ static void p54_unregister_leds(struct ieee80211_hw *dev)
if (priv->assoc_led.registered)
led_classdev_unregister(&priv->assoc_led.led_dev);
}
-#endif /* CONFIG_MAC80211_LEDS */
+#endif /* CONFIG_P54_LEDS */
static const struct ieee80211_ops p54_ops = {
.tx = p54_tx,
@@ -2592,11 +2592,11 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
return err;
}
- #ifdef CONFIG_MAC80211_LEDS
+#ifdef CONFIG_P54_LEDS
err = p54_init_leds(dev);
if (err)
return err;
- #endif /* CONFIG_MAC80211_LEDS */
+#endif /* CONFIG_P54_LEDS */
dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
return 0;
@@ -2610,9 +2610,9 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->output_limit);
kfree(priv->curve_data);
- #ifdef CONFIG_MAC80211_LEDS
+#ifdef CONFIG_P54_LEDS
p54_unregister_leds(dev);
- #endif /* CONFIG_MAC80211_LEDS */
+#endif /* CONFIG_P54_LEDS */
}
EXPORT_SYMBOL_GPL(p54_free_common);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 24fdfdfee3df..420fff42c0dd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2425,6 +2425,8 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
/* Surecom */
{ USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) },
+ /* Tilgin */
+ { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) },
/* Philips */
{ USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) },
/* Planex */
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index b728541f2fb5..3ab3eb957189 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -735,9 +735,9 @@ if (lp->tx_n_in_use > 0)
if (tx_status & AC_SFLD_OK) {
int ncollisions;
- lp->stats.tx_packets++;
+ dev->stats.tx_packets++;
ncollisions = tx_status & AC_SFLD_MAXCOL;
- lp->stats.collisions += ncollisions;
+ dev->stats.collisions += ncollisions;
#ifdef DEBUG_TX_INFO
if (ncollisions > 0)
printk(KERN_DEBUG
@@ -745,9 +745,9 @@ if (lp->tx_n_in_use > 0)
dev->name, ncollisions);
#endif
} else {
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (tx_status & AC_SFLD_S10) {
- lp->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
#ifdef DEBUG_TX_FAIL
printk(KERN_DEBUG
"%s: wv_complete(): tx error: no CS.\n",
@@ -755,7 +755,7 @@ if (lp->tx_n_in_use > 0)
#endif
}
if (tx_status & AC_SFLD_S9) {
- lp->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
#ifdef DEBUG_TX_FAIL
printk(KERN_DEBUG
"%s: wv_complete(): tx error: lost CTS.\n",
@@ -763,7 +763,7 @@ if (lp->tx_n_in_use > 0)
#endif
}
if (tx_status & AC_SFLD_S8) {
- lp->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
#ifdef DEBUG_TX_FAIL
printk(KERN_DEBUG
"%s: wv_complete(): tx error: slow DMA.\n",
@@ -771,7 +771,7 @@ if (lp->tx_n_in_use > 0)
#endif
}
if (tx_status & AC_SFLD_S6) {
- lp->stats.tx_heartbeat_errors++;
+ dev->stats.tx_heartbeat_errors++;
#ifdef DEBUG_TX_FAIL
printk(KERN_DEBUG
"%s: wv_complete(): tx error: heart beat.\n",
@@ -779,7 +779,7 @@ if (lp->tx_n_in_use > 0)
#endif
}
if (tx_status & AC_SFLD_S5) {
- lp->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
#ifdef DEBUG_TX_FAIL
printk(KERN_DEBUG
"%s: wv_complete(): tx error: too many collisions.\n",
@@ -1346,20 +1346,6 @@ static void wv_init_info(struct net_device * dev)
* or wireless extensions
*/
-/*------------------------------------------------------------------*/
-/*
- * Get the current Ethernet statistics. This may be called with the
- * card open or closed.
- * Used when the user read /proc/net/dev
- */
-static en_stats *wavelan_get_stats(struct net_device * dev)
-{
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
-#endif
-
- return &((net_local *)netdev_priv(dev))->stats;
-}
/*------------------------------------------------------------------*/
/*
@@ -2466,7 +2452,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
"%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
dev->name, sksize);
#endif
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
@@ -2526,8 +2512,8 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
netif_rx(skb);
/* Keep statistics up to date */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += sksize;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += sksize;
#ifdef DEBUG_RX_TRACE
printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
@@ -2608,7 +2594,7 @@ static void wv_receive(struct net_device * dev)
#endif
} else { /* If reception was no successful */
- lp->stats.rx_errors++;
+ dev->stats.rx_errors++;
#ifdef DEBUG_RX_INFO
printk(KERN_DEBUG
@@ -2624,7 +2610,7 @@ static void wv_receive(struct net_device * dev)
#endif
if ((fd.fd_status & FD_STATUS_S7) != 0) {
- lp->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
#ifdef DEBUG_RX_FAIL
printk(KERN_DEBUG
"%s: wv_receive(): frame too short.\n",
@@ -2633,7 +2619,7 @@ static void wv_receive(struct net_device * dev)
}
if ((fd.fd_status & FD_STATUS_S8) != 0) {
- lp->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
#ifdef DEBUG_RX_FAIL
printk(KERN_DEBUG
"%s: wv_receive(): rx DMA overrun.\n",
@@ -2642,7 +2628,7 @@ static void wv_receive(struct net_device * dev)
}
if ((fd.fd_status & FD_STATUS_S9) != 0) {
- lp->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
#ifdef DEBUG_RX_FAIL
printk(KERN_DEBUG
"%s: wv_receive(): ran out of resources.\n",
@@ -2651,7 +2637,7 @@ static void wv_receive(struct net_device * dev)
}
if ((fd.fd_status & FD_STATUS_S10) != 0) {
- lp->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
#ifdef DEBUG_RX_FAIL
printk(KERN_DEBUG
"%s: wv_receive(): alignment error.\n",
@@ -2660,7 +2646,7 @@ static void wv_receive(struct net_device * dev)
}
if ((fd.fd_status & FD_STATUS_S11) != 0) {
- lp->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
#ifdef DEBUG_RX_FAIL
printk(KERN_DEBUG
"%s: wv_receive(): CRC error.\n",
@@ -2826,7 +2812,7 @@ static int wv_packet_write(struct net_device * dev, void *buf, short length)
dev->trans_start = jiffies;
/* Keep stats up to date. */
- lp->stats.tx_bytes += length;
+ dev->stats.tx_bytes += length;
if (lp->tx_first_in_use == I82586NULL)
lp->tx_first_in_use = txblock;
@@ -4038,6 +4024,22 @@ static int wavelan_close(struct net_device * dev)
return 0;
}
+static const struct net_device_ops wavelan_netdev_ops = {
+ .ndo_open = wavelan_open,
+ .ndo_stop = wavelan_close,
+ .ndo_start_xmit = wavelan_packet_xmit,
+ .ndo_set_multicast_list = wavelan_set_multicast_list,
+ .ndo_tx_timeout = wavelan_watchdog,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef SET_MAC_ADDRESS
+ .ndo_set_mac_address = wavelan_set_mac_address
+#else
+ .ndo_set_mac_address = eth_mac_addr,
+#endif
+};
+
+
/*------------------------------------------------------------------*/
/*
* Probe an I/O address, and if the WaveLAN is there configure the
@@ -4130,17 +4132,8 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
/* Init spinlock */
spin_lock_init(&lp->spinlock);
- dev->open = wavelan_open;
- dev->stop = wavelan_close;
- dev->hard_start_xmit = wavelan_packet_xmit;
- dev->get_stats = wavelan_get_stats;
- dev->set_multicast_list = &wavelan_set_multicast_list;
- dev->tx_timeout = &wavelan_watchdog;
- dev->watchdog_timeo = WATCHDOG_JIFFIES;
-#ifdef SET_MAC_ADDRESS
- dev->set_mac_address = &wavelan_set_mac_address;
-#endif /* SET_MAC_ADDRESS */
-
+ dev->netdev_ops = &wavelan_netdev_ops;
+ dev->watchdog_timeo = WATCHDOG_JIFFIES;
dev->wireless_handlers = &wavelan_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 44d31bbf39e4..2daa0210d789 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -459,11 +459,9 @@ static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/
/****************************** TYPES ******************************/
/* Shortcuts */
-typedef struct net_device_stats en_stats;
typedef struct iw_statistics iw_stats;
typedef struct iw_quality iw_qual;
-typedef struct iw_freq iw_freq;
-typedef struct net_local net_local;
+typedef struct iw_freq iw_freq;typedef struct net_local net_local;
typedef struct timer_list timer_list;
/* Basic types */
@@ -475,15 +473,12 @@ typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
* For each network interface, Linux keeps data in two structures: "device"
* keeps the generic data (same format for everybody) and "net_local" keeps
* additional specific data.
- * Note that some of this specific data is in fact generic (en_stats, for
- * example).
*/
struct net_local
{
net_local * next; /* linked list of the devices */
struct net_device * dev; /* reverse link */
spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
- en_stats stats; /* Ethernet interface statistics */
int nresets; /* number of hardware resets */
u_char reconfig_82586; /* We need to reconfigure the controller. */
u_char promiscuous; /* promiscuous mode */
@@ -601,8 +596,6 @@ static void
static inline void
wv_init_info(struct net_device *); /* display startup info */
/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
-static en_stats *
- wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */
static iw_stats *
wavelan_get_wireless_stats(struct net_device *);
static void
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index b1b947edcf01..540a2948596c 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -53,11 +53,11 @@ config SSB_B43_PCI_BRIDGE
config SSB_PCMCIAHOST_POSSIBLE
bool
- depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL
+ depends on SSB && (PCMCIA = y || PCMCIA = SSB)
default y
config SSB_PCMCIAHOST
- bool "Support for SSB on PCMCIA-bus host (EXPERIMENTAL)"
+ bool "Support for SSB on PCMCIA-bus host"
depends on SSB_PCMCIAHOST_POSSIBLE
select SSB_SPROM
help
@@ -107,14 +107,14 @@ config SSB_DRIVER_PCICORE
If unsure, say Y
config SSB_PCICORE_HOSTMODE
- bool "Hostmode support for SSB PCI core (EXPERIMENTAL)"
- depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && EXPERIMENTAL
+ bool "Hostmode support for SSB PCI core"
+ depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
help
PCIcore hostmode operation (external PCI bus).
config SSB_DRIVER_MIPS
- bool "SSB Broadcom MIPS core driver (EXPERIMENTAL)"
- depends on SSB && MIPS && EXPERIMENTAL
+ bool "SSB Broadcom MIPS core driver"
+ depends on SSB && MIPS
select SSB_SERIAL
help
Driver for the Sonics Silicon Backplane attached
@@ -129,8 +129,8 @@ config SSB_EMBEDDED
default y
config SSB_DRIVER_EXTIF
- bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)"
- depends on SSB_DRIVER_MIPS && EXPERIMENTAL
+ bool "SSB Broadcom EXTIF core driver"
+ depends on SSB_DRIVER_MIPS
help
Driver for the Sonics Silicon Backplane attached
Broadcom EXTIF core.
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 27a677584a4c..ef9c6a04ad8f 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -18,6 +18,7 @@
static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4306) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c295ba786edd..f0c7de78e205 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -41,8 +41,8 @@ extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
extern const struct file_operations v9fs_dir_operations;
-extern struct dentry_operations v9fs_dentry_operations;
-extern struct dentry_operations v9fs_cached_dentry_operations;
+extern const struct dentry_operations v9fs_dentry_operations;
+extern const struct dentry_operations v9fs_cached_dentry_operations;
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 06dcc7c4f234..d74325295b1e 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -104,12 +104,12 @@ void v9fs_dentry_release(struct dentry *dentry)
}
}
-struct dentry_operations v9fs_cached_dentry_operations = {
+const struct dentry_operations v9fs_cached_dentry_operations = {
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};
-struct dentry_operations v9fs_dentry_operations = {
+const struct dentry_operations v9fs_dentry_operations = {
.d_delete = v9fs_dentry_delete,
.d_release = v9fs_dentry_release,
};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 93212e40221a..5f8ab8adb5f5 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
-P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n");
- return simple_set_mnt(mnt, sb);
+P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+ simple_set_mnt(mnt, sb);
+ return 0;
release_sb:
if (sb) {
diff --git a/fs/Kconfig b/fs/Kconfig
index 93945dd0b1ae..cef8b18ceaa3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -56,61 +56,7 @@ endif # BLOCK
source "fs/notify/Kconfig"
-config QUOTA
- bool "Quota support"
- help
- If you say Y here, you will be able to set per user limits for disk
- usage (also called disk quotas). Currently, it works for the
- ext2, ext3, and reiserfs file system. ext3 also supports journalled
- quotas for which you don't need to run quotacheck(8) after an unclean
- shutdown.
- For further details, read the Quota mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, or the documentation provided
- with the quota tools. Probably the quota support is only useful for
- multi user systems. If unsure, say N.
-
-config QUOTA_NETLINK_INTERFACE
- bool "Report quota messages through netlink interface"
- depends on QUOTA && NET
- help
- If you say Y here, quota warnings (about exceeding softlimit, reaching
- hardlimit, etc.) will be reported through netlink interface. If unsure,
- say Y.
-
-config PRINT_QUOTA_WARNING
- bool "Print quota warnings to console (OBSOLETE)"
- depends on QUOTA
- default y
- help
- If you say Y here, quota warnings (about exceeding softlimit, reaching
- hardlimit, etc.) will be printed to the process' controlling terminal.
- Note that this behavior is currently deprecated and may go away in
- future. Please use notification via netlink socket instead.
-
-# Generic support for tree structured quota files. Seleted when needed.
-config QUOTA_TREE
- tristate
-
-config QFMT_V1
- tristate "Old quota format support"
- depends on QUOTA
- help
- This quota format was (is) used by kernels earlier than 2.4.22. If
- you have quota working and you don't want to convert to new quota
- format say Y here.
-
-config QFMT_V2
- tristate "Quota format v2 support"
- depends on QUOTA
- select QUOTA_TREE
- help
- This quota format allows using quotas with 32-bit UIDs/GIDs. If you
- need this functionality say Y here.
-
-config QUOTACTL
- bool
- depends on XFS_QUOTA || QUOTA
- default y
+source "fs/quota/Kconfig"
source "fs/autofs/Kconfig"
source "fs/autofs4/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dc20db348679..6e82a307bcd4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
-obj-$(CONFIG_QUOTA) += dquot.o
-obj-$(CONFIG_QFMT_V1) += quota_v1.o
-obj-$(CONFIG_QFMT_V2) += quota_v2.o
-obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
-obj-$(CONFIG_QUOTACTL) += quota.o
+obj-y += quota/
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 831157502d5a..e0a85dbeeb88 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -86,7 +86,7 @@ void __adfs_error(struct super_block *sb, const char *function,
/* dir_*.c */
extern const struct inode_operations adfs_dir_inode_operations;
extern const struct file_operations adfs_dir_operations;
-extern struct dentry_operations adfs_dentry_operations;
+extern const struct dentry_operations adfs_dentry_operations;
extern struct adfs_dir_ops adfs_f_dir_ops;
extern struct adfs_dir_ops adfs_fplus_dir_ops;
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 85a30e929800..e867ccf37246 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -263,7 +263,7 @@ adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
return 0;
}
-struct dentry_operations adfs_dentry_operations = {
+const struct dentry_operations adfs_dentry_operations = {
.d_hash = adfs_hash,
.d_compare = adfs_compare,
};
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e9ec915f7553..1a2d5e3c7f4e 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -199,8 +199,7 @@ extern const struct address_space_operations affs_symlink_aops;
extern const struct address_space_operations affs_aops;
extern const struct address_space_operations affs_aops_ofs;
-extern struct dentry_operations affs_dentry_operations;
-extern struct dentry_operations affs_dentry_operations_intl;
+extern const struct dentry_operations affs_dentry_operations;
static inline void
affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 805573005de6..7d0f0a30f7a3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -179,14 +179,18 @@ affs_remove_link(struct dentry *dentry)
affs_lock_dir(dir);
affs_fix_dcache(dentry, link_ino);
retval = affs_remove_hash(dir, link_bh);
- if (retval)
+ if (retval) {
+ affs_unlock_dir(dir);
goto done;
+ }
mark_buffer_dirty_inode(link_bh, inode);
memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
retval = affs_insert_hash(dir, bh);
- if (retval)
+ if (retval) {
+ affs_unlock_dir(dir);
goto done;
+ }
mark_buffer_dirty_inode(bh, inode);
affs_unlock_dir(dir);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index cfcf1b6cf82b..960d336ec694 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -19,12 +19,12 @@ static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(struct dentry *, struct qstr *);
static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
-struct dentry_operations affs_dentry_operations = {
+const struct dentry_operations affs_dentry_operations = {
.d_hash = affs_hash_dentry,
.d_compare = affs_compare_dentry,
};
-static struct dentry_operations affs_intl_dentry_operations = {
+static const struct dentry_operations affs_intl_dentry_operations = {
.d_hash = affs_intl_hash_dentry,
.d_compare = affs_intl_compare_dentry,
};
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 99cf390641f7..9bd757774c9e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = {
.setattr = afs_setattr,
};
-static struct dentry_operations afs_fs_dentry_operations = {
+static const struct dentry_operations afs_fs_dentry_operations = {
.d_revalidate = afs_d_revalidate,
.d_delete = afs_d_delete,
.d_release = afs_d_release,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3bbdb9d02376..1dd96d4406c0 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -48,7 +48,7 @@ static struct file_system_type anon_inode_fs_type = {
.get_sb = anon_inodefs_get_sb,
.kill_sb = kill_anon_super,
};
-static struct dentry_operations anon_inodefs_dentry_operations = {
+static const struct dentry_operations anon_inodefs_dentry_operations = {
.d_delete = anon_inodefs_delete_dentry,
};
diff --git a/fs/attr.c b/fs/attr.c
index f4360192a938..9fe1b1bd30a8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ?
+ -EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8aacade56956..4a1401cea0a1 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -192,7 +192,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
return 1;
}
-static struct dentry_operations autofs_dentry_operations = {
+static const struct dentry_operations autofs_dentry_operations = {
.d_revalidate = autofs_revalidate,
};
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 716e12b627b2..69c8142da838 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -310,7 +310,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
return ino;
}
-static struct dentry_operations autofs4_sb_dentry_operations = {
+static const struct dentry_operations autofs4_sb_dentry_operations = {
.d_release = autofs4_dentry_release,
};
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a41c2a7fc52..74b1469a9504 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -349,13 +349,13 @@ void autofs4_dentry_release(struct dentry *de)
}
/* For dentries of directories in the root dir */
-static struct dentry_operations autofs4_root_dentry_operations = {
+static const struct dentry_operations autofs4_root_dentry_operations = {
.d_revalidate = autofs4_revalidate,
.d_release = autofs4_dentry_release,
};
/* For other dentries */
-static struct dentry_operations autofs4_dentry_operations = {
+static const struct dentry_operations autofs4_dentry_operations = {
.d_revalidate = autofs4_revalidate,
.d_release = autofs4_dentry_release,
};
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b3c1efff5e1d..8c3c6899ccf3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/buffer_head.h>
+#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/mount.h>
@@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
iov, offset, nr_segs, blkdev_get_blocks, NULL);
}
+/*
+ * Write out and wait upon all the dirty data associated with a block
+ * device via its mapping. Does not take the superblock lock.
+ */
+int sync_blockdev(struct block_device *bdev)
+{
+ int ret = 0;
+
+ if (bdev)
+ ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return ret;
+}
+EXPORT_SYMBOL(sync_blockdev);
+
+/*
+ * Write out and wait upon all dirty data associated with this
+ * device. Filesystem data as well as the underlying block
+ * device. Takes the superblock lock.
+ */
+int fsync_bdev(struct block_device *bdev)
+{
+ struct super_block *sb = get_super(bdev);
+ if (sb) {
+ int res = fsync_super(sb);
+ drop_super(sb);
+ return res;
+ }
+ return sync_blockdev(bdev);
+}
+
+/**
+ * freeze_bdev -- lock a filesystem and force it into a consistent state
+ * @bdev: blockdevice to lock
+ *
+ * This takes the block device bd_mount_sem to make sure no new mounts
+ * happen on bdev until thaw_bdev() is called.
+ * If a superblock is found on this device, we take the s_umount semaphore
+ * on it to make sure nobody unmounts until the snapshot creation is done.
+ * The reference counter (bd_fsfreeze_count) guarantees that only the last
+ * unfreeze process can unfreeze the frozen filesystem actually when multiple
+ * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
+ * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
+ * actually.
+ */
+struct super_block *freeze_bdev(struct block_device *bdev)
+{
+ struct super_block *sb;
+ int error = 0;
+
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ bdev->bd_fsfreeze_count++;
+ sb = get_super(bdev);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return sb;
+ }
+ bdev->bd_fsfreeze_count++;
+
+ down(&bdev->bd_mount_sem);
+ sb = get_super(bdev);
+ if (sb && !(sb->s_flags & MS_RDONLY)) {
+ sb->s_frozen = SB_FREEZE_WRITE;
+ smp_wmb();
+
+ __fsync_super(sb);
+
+ sb->s_frozen = SB_FREEZE_TRANS;
+ smp_wmb();
+
+ sync_blockdev(sb->s_bdev);
+
+ if (sb->s_op->freeze_fs) {
+ error = sb->s_op->freeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem freeze failed\n");
+ sb->s_frozen = SB_UNFROZEN;
+ drop_super(sb);
+ up(&bdev->bd_mount_sem);
+ bdev->bd_fsfreeze_count--;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return ERR_PTR(error);
+ }
+ }
+ }
+
+ sync_blockdev(bdev);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+
+ return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
+}
+EXPORT_SYMBOL(freeze_bdev);
+
+/**
+ * thaw_bdev -- unlock filesystem
+ * @bdev: blockdevice to unlock
+ * @sb: associated superblock
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_bdev().
+ */
+int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+ int error = 0;
+
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (!bdev->bd_fsfreeze_count) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return -EINVAL;
+ }
+
+ bdev->bd_fsfreeze_count--;
+ if (bdev->bd_fsfreeze_count > 0) {
+ if (sb)
+ drop_super(sb);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return 0;
+ }
+
+ if (sb) {
+ BUG_ON(sb->s_bdev != bdev);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ if (sb->s_op->unfreeze_fs) {
+ error = sb->s_op->unfreeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem thaw failed\n");
+ sb->s_frozen = SB_FREEZE_TRANS;
+ bdev->bd_fsfreeze_count++;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return error;
+ }
+ }
+ sb->s_frozen = SB_UNFROZEN;
+ smp_wmb();
+ wake_up(&sb->s_wait_unfrozen);
+ }
+ drop_super(sb);
+ }
+
+ up(&bdev->bd_mount_sem);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(thaw_bdev);
+
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, blkdev_get_block, wbc);
diff --git a/fs/buffer.c b/fs/buffer.c
index 891e1c78e4f1..a2fd743d97cb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
}
/*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping. Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
- int ret = 0;
-
- if (bdev)
- ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
- return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
- struct super_block *sb = get_super(bdev);
- if (sb) {
- int res = fsync_super(sb);
- drop_super(sb);
- return res;
- }
- return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev -- lock a filesystem and force it into a consistent state
- * @bdev: blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- * The reference counter (bd_fsfreeze_count) guarantees that only the last
- * unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
- * actually.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
- struct super_block *sb;
- int error = 0;
-
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- bdev->bd_fsfreeze_count++;
- sb = get_super(bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return sb;
- }
- bdev->bd_fsfreeze_count++;
-
- down(&bdev->bd_mount_sem);
- sb = get_super(bdev);
- if (sb && !(sb->s_flags & MS_RDONLY)) {
- sb->s_frozen = SB_FREEZE_WRITE;
- smp_wmb();
-
- __fsync_super(sb);
-
- sb->s_frozen = SB_FREEZE_TRANS;
- smp_wmb();
-
- sync_blockdev(sb->s_bdev);
-
- if (sb->s_op->freeze_fs) {
- error = sb->s_op->freeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem freeze failed\n");
- sb->s_frozen = SB_UNFROZEN;
- drop_super(sb);
- up(&bdev->bd_mount_sem);
- bdev->bd_fsfreeze_count--;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return ERR_PTR(error);
- }
- }
- }
-
- sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
- return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev -- unlock filesystem
- * @bdev: blockdevice to unlock
- * @sb: associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- int error = 0;
-
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (!bdev->bd_fsfreeze_count) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return -EINVAL;
- }
-
- bdev->bd_fsfreeze_count--;
- if (bdev->bd_fsfreeze_count > 0) {
- if (sb)
- drop_super(sb);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
- }
-
- if (sb) {
- BUG_ON(sb->s_bdev != bdev);
- if (!(sb->s_flags & MS_RDONLY)) {
- if (sb->s_op->unfreeze_fs) {
- error = sb->s_op->unfreeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem thaw failed\n");
- sb->s_frozen = SB_FREEZE_TRANS;
- bdev->bd_fsfreeze_count++;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return error;
- }
- }
- sb->s_frozen = SB_UNFROZEN;
- smp_wmb();
- wake_up(&sb->s_wait_unfrozen);
- }
- drop_super(sb);
- }
-
- up(&bdev->bd_mount_sem);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
-}
-EXPORT_SYMBOL(thaw_bdev);
-
-/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 13ea53251dcf..38491fd3871d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -606,7 +606,8 @@ cifs_get_sb(struct file_system_type *fs_type,
return rc;
}
sb->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2b1d28a9ee28..77e190dc2883 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -78,8 +78,8 @@ extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
/* Functions related to dir entries */
-extern struct dentry_operations cifs_dentry_ops;
-extern struct dentry_operations cifs_ci_dentry_ops;
+extern const struct dentry_operations cifs_dentry_ops;
+extern const struct dentry_operations cifs_ci_dentry_ops;
/* Functions related to symlinks */
extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9b6f68be976..2f35cccfcd8d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -701,7 +701,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
return rc;
} */
-struct dentry_operations cifs_dentry_ops = {
+const struct dentry_operations cifs_dentry_ops = {
.d_revalidate = cifs_d_revalidate,
/* d_delete: cifs_d_delete, */ /* not needed except for debugging */
};
@@ -739,7 +739,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
return 1;
}
-struct dentry_operations cifs_ci_dentry_ops = {
+const struct dentry_operations cifs_ci_dentry_ops = {
.d_revalidate = cifs_d_revalidate,
.d_hash = cifs_ci_hash,
.d_compare = cifs_ci_compare,
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 75b1fa90b2cb..4bb9d0a5decc 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -59,7 +59,7 @@ static int coda_return_EIO(void)
}
#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
-static struct dentry_operations coda_dentry_operations =
+static const struct dentry_operations coda_dentry_operations =
{
.d_revalidate = coda_dentry_revalidate,
.d_delete = coda_dentry_delete,
diff --git a/fs/compat.c b/fs/compat.c
index 0949b43794a4..5e374aad33f7 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -378,6 +378,34 @@ out:
return error;
}
+/*
+ * This is a copy of sys_ustat, just dealing with a structure layout.
+ * Given how simple this syscall is that apporach is more maintainable
+ * than the various conversion hacks.
+ */
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
+{
+ struct super_block *sb;
+ struct compat_ustat tmp;
+ struct kstatfs sbuf;
+ int err;
+
+ sb = user_get_super(new_decode_dev(dev));
+ if (!sb)
+ return -EINVAL;
+ err = vfs_statfs(sb->s_root, &sbuf);
+ drop_super(sb);
+ if (err)
+ return err;
+
+ memset(&tmp, 0, sizeof(struct compat_ustat));
+ tmp.f_tfree = sbuf.f_bfree;
+ tmp.f_tinode = sbuf.f_ffree;
+ if (copy_to_user(u, &tmp, sizeof(struct compat_ustat)))
+ return -EFAULT;
+ return 0;
+}
+
static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
{
if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e82..05373db21a4e 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@ static int configfs_d_delete(struct dentry *dentry)
return 1;
}
-static struct dentry_operations configfs_dentry_ops = {
+static const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
/* simple_delete_dentry() isn't exported */
.d_delete = configfs_d_delete,
diff --git a/fs/dcache.c b/fs/dcache.c
index 07e2d4a44bda..90bbd7e1b116 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,15 +1247,18 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
struct dentry *found;
struct dentry *new;
- /* Does a dentry matching the name exist already? */
+ /*
+ * First check if a dentry matching the name already exists,
+ * if not go ahead and create it now.
+ */
found = d_hash_and_lookup(dentry->d_parent, name);
- /* If not, create it now and return */
if (!found) {
new = d_alloc(dentry->d_parent, name);
if (!new) {
error = -ENOMEM;
goto err_out;
}
+
found = d_splice_alias(inode, new);
if (found) {
dput(new);
@@ -1263,61 +1266,46 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
return new;
}
- /* Matching dentry exists, check if it is negative. */
+
+ /*
+ * If a matching dentry exists, and it's not negative use it.
+ *
+ * Decrement the reference count to balance the iget() done
+ * earlier on.
+ */
if (found->d_inode) {
if (unlikely(found->d_inode != inode)) {
/* This can't happen because bad inodes are unhashed. */
BUG_ON(!is_bad_inode(inode));
BUG_ON(!is_bad_inode(found->d_inode));
}
- /*
- * Already have the inode and the dentry attached, decrement
- * the reference count to balance the iget() done
- * earlier on. We found the dentry using d_lookup() so it
- * cannot be disconnected and thus we do not need to worry
- * about any NFS/disconnectedness issues here.
- */
iput(inode);
return found;
}
+
/*
* Negative dentry: instantiate it unless the inode is a directory and
- * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
- * in which case d_move() that in place of the found dentry.
+ * already has a dentry.
*/
- if (!S_ISDIR(inode->i_mode)) {
- /* Not a directory; everything is easy. */
- d_instantiate(found, inode);
- return found;
- }
spin_lock(&dcache_lock);
- if (list_empty(&inode->i_dentry)) {
- /*
- * Directory without a 'disconnected' dentry; we need to do
- * d_instantiate() by hand because it takes dcache_lock which
- * we already hold.
- */
+ if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
__d_instantiate(found, inode);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
return found;
}
+
/*
- * Directory with a 'disconnected' dentry; get a reference to the
- * 'disconnected' dentry.
+ * In case a directory already has a (disconnected) entry grab a
+ * reference to it, move it in place and use it.
*/
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dget_locked(new);
spin_unlock(&dcache_lock);
- /* Do security vodoo. */
security_d_instantiate(found, inode);
- /* Move new in place of found. */
d_move(new, found);
- /* Balance the iget() we did above. */
iput(inode);
- /* Throw away found. */
dput(found);
- /* Use new as the actual dentry. */
return new;
err_out:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bff4052b05e7..63a4a59e4148 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -322,177 +322,81 @@ static int compare_init_pts_sb(struct super_block *s, void *p)
}
/*
- * Safely parse the mount options in @data and update @opts.
+ * devpts_get_sb()
*
- * devpts ends up parsing options two times during mount, due to the
- * two modes of operation it supports. The first parse occurs in
- * devpts_get_sb() when determining the mode (single-instance or
- * multi-instance mode). The second parse happens in devpts_remount()
- * or new_pts_mount() depending on the mode.
+ * If the '-o newinstance' mount option was specified, mount a new
+ * (private) instance of devpts. PTYs created in this instance are
+ * independent of the PTYs in other devpts instances.
*
- * Parsing of options modifies the @data making subsequent parsing
- * incorrect. So make a local copy of @data and parse it.
+ * If the '-o newinstance' option was not specified, mount/remount the
+ * initial kernel mount of devpts. This type of mount gives the
+ * legacy, single-instance semantics.
*
- * Return: 0 On success, -errno on error
- */
-static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts)
-{
- int rc;
- void *datacp;
-
- if (!data)
- return 0;
-
- /* Use kstrdup() ? */
- datacp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!datacp)
- return -ENOMEM;
-
- memcpy(datacp, data, PAGE_SIZE);
- rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts);
- kfree(datacp);
-
- return rc;
-}
-
-/*
- * Mount a new (private) instance of devpts. PTYs created in this
- * instance are independent of the PTYs in other devpts instances.
- */
-static int new_pts_mount(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
-{
- int err;
- struct pts_fs_info *fsi;
- struct pts_mount_opts *opts;
-
- err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
- if (err)
- return err;
-
- fsi = DEVPTS_SB(mnt->mnt_sb);
- opts = &fsi->mount_opts;
-
- err = parse_mount_options(data, PARSE_MOUNT, opts);
- if (err)
- goto fail;
-
- err = mknod_ptmx(mnt->mnt_sb);
- if (err)
- goto fail;
-
- return 0;
-
-fail:
- dput(mnt->mnt_sb->s_root);
- deactivate_super(mnt->mnt_sb);
- return err;
-}
-
-/*
- * Check if 'newinstance' mount option was specified in @data.
+ * The 'newinstance' option is needed to support multiple namespace
+ * semantics in devpts while preserving backward compatibility of the
+ * current 'single-namespace' semantics. i.e all mounts of devpts
+ * without the 'newinstance' mount option should bind to the initial
+ * kernel mount, like get_sb_single().
*
- * Return: -errno on error (eg: invalid mount options specified)
- * : 1 if 'newinstance' mount option was specified
- * : 0 if 'newinstance' mount option was NOT specified
- */
-static int is_new_instance_mount(void *data)
-{
- int rc;
- struct pts_mount_opts opts;
-
- if (!data)
- return 0;
-
- rc = safe_parse_mount_options(data, &opts);
- if (!rc)
- rc = opts.newinstance;
-
- return rc;
-}
-
-/*
- * get_init_pts_sb()
- *
- * This interface is needed to support multiple namespace semantics in
- * devpts while preserving backward compatibility of the current 'single-
- * namespace' semantics. i.e all mounts of devpts without the 'newinstance'
- * mount option should bind to the initial kernel mount, like
- * get_sb_single().
+ * Mounts with 'newinstance' option create a new, private namespace.
*
- * Mounts with 'newinstance' option create a new private namespace.
+ * NOTE:
*
- * But for single-mount semantics, devpts cannot use get_sb_single(),
+ * For single-mount semantics, devpts cannot use get_sb_single(),
* because get_sb_single()/sget() find and use the super-block from
* the most recent mount of devpts. But that recent mount may be a
* 'newinstance' mount and get_sb_single() would pick the newinstance
* super-block instead of the initial super-block.
- *
- * This interface is identical to get_sb_single() except that it
- * consistently selects the 'single-namespace' superblock even in the
- * presence of the private namespace (i.e 'newinstance') super-blocks.
*/
-static int get_init_pts_sb(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
+static int devpts_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- struct super_block *s;
int error;
+ struct pts_mount_opts opts;
+ struct super_block *s;
+
+ memset(&opts, 0, sizeof(opts));
+ if (data) {
+ error = parse_mount_options(data, PARSE_MOUNT, &opts);
+ if (error)
+ return error;
+ }
+
+ if (opts.newinstance)
+ s = sget(fs_type, NULL, set_anon_super, NULL);
+ else
+ s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
- s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
if (IS_ERR(s))
return PTR_ERR(s);
if (!s->s_root) {
s->s_flags = flags;
error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- up_write(&s->s_umount);
- deactivate_super(s);
- return error;
- }
+ if (error)
+ goto out_undo_sget;
s->s_flags |= MS_ACTIVE;
}
- do_remount_sb(s, flags, data, 0);
- return simple_set_mnt(mnt, s);
-}
-/*
- * Mount or remount the initial kernel mount of devpts. This type of
- * mount maintains the legacy, single-instance semantics, while the
- * kernel still allows multiple-instances.
- */
-static int init_pts_mount(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
-{
- int err;
+ simple_set_mnt(mnt, s);
- err = get_init_pts_sb(fs_type, flags, data, mnt);
- if (err)
- return err;
+ memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
- err = mknod_ptmx(mnt->mnt_sb);
- if (err) {
- dput(mnt->mnt_sb->s_root);
- deactivate_super(mnt->mnt_sb);
- }
+ error = mknod_ptmx(s);
+ if (error)
+ goto out_dput;
- return err;
-}
-
-static int devpts_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
-{
- int new;
-
- new = is_new_instance_mount(data);
- if (new < 0)
- return new;
+ return 0;
- if (new)
- return new_pts_mount(fs_type, flags, data, mnt);
+out_dput:
+ dput(s->s_root);
- return init_pts_mount(fs_type, flags, data, mnt);
+out_undo_sget:
+ up_write(&s->s_umount);
+ deactivate_super(s);
+ return error;
}
+
#else
/*
* This supports only the legacy single-instance semantics (no
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f879a17..858fba14aaa6 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
@@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
list_del(&de->list);
kfree(de);
out:
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
}
void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls)
DLM_ASSERT(list_empty(&ls->ls_recover_list), );
for (i = 0; i < ls->ls_dirtbl_size; i++) {
- write_lock(&ls->ls_dirtbl[i].lock);
+ spin_lock(&ls->ls_dirtbl[i].lock);
head = &ls->ls_dirtbl[i].list;
while (!list_empty(head)) {
de = list_entry(head->next, struct dlm_direntry, list);
list_del(&de->list);
put_free_de(ls, de);
}
- write_unlock(&ls->ls_dirtbl[i].lock);
+ spin_unlock(&ls->ls_dirtbl[i].lock);
}
}
@@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
if (de) {
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (*r_nodeid == nodeid)
return -EEXIST;
return 0;
}
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (namelen > DLM_RESNAME_MAXLEN)
return -EINVAL;
@@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
de->length = namelen;
memcpy(de->name, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
tmp = search_bucket(ls, name, namelen, bucket);
if (tmp) {
kfree(de);
@@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
}
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
return 0;
}
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f38bc8..d01ca0a711db 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@ struct dlm_direntry {
struct dlm_dirtable {
struct list_head list;
- rwlock_t lock;
+ spinlock_t lock;
};
struct dlm_rsbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 01e7d39c5fba..205ec95b347e 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -835,7 +835,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
lkb->lkb_wait_count++;
hold_lkb(lkb);
- log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
+ log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
lkb->lkb_id, lkb->lkb_wait_type, mstype,
lkb->lkb_wait_count, lkb->lkb_flags);
goto out;
@@ -851,7 +851,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
out:
if (error)
- log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
+ log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, lkb->lkb_flags, mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
mutex_unlock(&ls->ls_waiters_mutex);
@@ -863,23 +863,55 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
request reply on the requestqueue) between dlm_recover_waiters_pre() which
set RESEND and dlm_recover_waiters_post() */
-static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
+static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
+ struct dlm_message *ms)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int overlap_done = 0;
if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
+ log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
overlap_done = 1;
goto out_del;
}
if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
+ log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
overlap_done = 1;
goto out_del;
}
+ /* Cancel state was preemptively cleared by a successful convert,
+ see next comment, nothing to do. */
+
+ if ((mstype == DLM_MSG_CANCEL_REPLY) &&
+ (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
+ log_debug(ls, "remwait %x cancel_reply wait_type %d",
+ lkb->lkb_id, lkb->lkb_wait_type);
+ return -1;
+ }
+
+ /* Remove for the convert reply, and premptively remove for the
+ cancel reply. A convert has been granted while there's still
+ an outstanding cancel on it (the cancel is moot and the result
+ in the cancel reply should be 0). We preempt the cancel reply
+ because the app gets the convert result and then can follow up
+ with another op, like convert. This subsequent op would see the
+ lingering state of the cancel and fail with -EBUSY. */
+
+ if ((mstype == DLM_MSG_CONVERT_REPLY) &&
+ (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
+ is_overlap_cancel(lkb) && ms && !ms->m_result) {
+ log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
+ lkb->lkb_id);
+ lkb->lkb_wait_type = 0;
+ lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+ lkb->lkb_wait_count--;
+ goto out_del;
+ }
+
/* N.B. type of reply may not always correspond to type of original
msg due to lookup->request optimization, verify others? */
@@ -888,8 +920,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
goto out_del;
}
- log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
- lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
+ log_error(ls, "remwait error %x reply %d flags %x no wait_type",
+ lkb->lkb_id, mstype, lkb->lkb_flags);
return -1;
out_del:
@@ -899,7 +931,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
this would happen */
if (overlap_done && lkb->lkb_wait_type) {
- log_error(ls, "remove_from_waiters %x reply %d give up on %d",
+ log_error(ls, "remwait error %x reply %d wait_type %d overlap",
lkb->lkb_id, mstype, lkb->lkb_wait_type);
lkb->lkb_wait_count--;
lkb->lkb_wait_type = 0;
@@ -921,7 +953,7 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
int error;
mutex_lock(&ls->ls_waiters_mutex);
- error = _remove_from_waiters(lkb, mstype);
+ error = _remove_from_waiters(lkb, mstype, NULL);
mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
@@ -936,7 +968,7 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
if (ms != &ls->ls_stub_ms)
mutex_lock(&ls->ls_waiters_mutex);
- error = _remove_from_waiters(lkb, ms->m_type);
+ error = _remove_from_waiters(lkb, ms->m_type, ms);
if (ms != &ls->ls_stub_ms)
mutex_unlock(&ls->ls_waiters_mutex);
return error;
@@ -2083,6 +2115,11 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
lkb->lkb_timeout_cs = args->timeout;
rv = 0;
out:
+ if (rv)
+ log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
+ rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
+ lkb->lkb_status, lkb->lkb_wait_type,
+ lkb->lkb_resource->res_name);
return rv;
}
@@ -2149,6 +2186,13 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
goto out;
}
+ /* there's nothing to cancel */
+ if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
+ !lkb->lkb_wait_type) {
+ rv = -EBUSY;
+ goto out;
+ }
+
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f02493..cd8e2df3c295 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
goto out_lkbfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
- rwlock_init(&ls->ls_dirtbl[i].lock);
+ spin_lock_init(&ls->ls_dirtbl[i].lock);
}
INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5ebd1371..609108a83267 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
*
* Cluster nodes are referred to by their nodeids. nodeids are
* simply 32 bit numbers to the locking module - if they need to
- * be expanded for the cluster infrastructure then that is it's
+ * be expanded for the cluster infrastructure then that is its
* responsibility. It is this layer's
* responsibility to resolve these into IP address or
* whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
* of high load. Also, this way, the sending thread can collect together
* messages bound for one node and send them in one block.
*
- * lowcomms will choose to use wither TCP or SCTP as its transport layer
+ * lowcomms will choose to use either TCP or SCTP as its transport layer
* depending on the configuration variable 'protocol'. This should be set
- * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a
+ * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
* cluster-wide mechanism as it must be the same on all nodes of the cluster
* for the DLM to function.
*
@@ -48,11 +48,11 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/pagemap.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/mutex.h>
#include <linux/sctp.h>
#include <net/sctp/user.h>
+#include <net/ipv6.h>
#include "dlm_internal.h"
#include "lowcomms.h"
@@ -60,6 +60,7 @@
#include "config.h"
#define NEEDED_RMEM (4*1024*1024)
+#define CONN_HASH_SIZE 32
struct cbuf {
unsigned int base;
@@ -114,6 +115,7 @@ struct connection {
int retries;
#define MAX_CONNECT_RETRIES 3
int sctp_assoc;
+ struct hlist_node list;
struct connection *othercon;
struct work_struct rwork; /* Receive workqueue */
struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@ static int dlm_local_count;
static struct workqueue_struct *recv_workqueue;
static struct workqueue_struct *send_workqueue;
-static DEFINE_IDR(connections_idr);
+static struct hlist_head connection_hash[CONN_HASH_SIZE];
static DEFINE_MUTEX(connections_lock);
-static int max_nodeid;
static struct kmem_cache *con_cache;
static void process_recv_sockets(struct work_struct *work);
static void process_send_sockets(struct work_struct *work);
+
+/* This is deliberately very simple because most clusters have simple
+ sequential nodeids, so we should be able to go straight to a connection
+ struct in the array */
+static inline int nodeid_hash(int nodeid)
+{
+ return nodeid & (CONN_HASH_SIZE-1);
+}
+
+static struct connection *__find_con(int nodeid)
+{
+ int r;
+ struct hlist_node *h;
+ struct connection *con;
+
+ r = nodeid_hash(nodeid);
+
+ hlist_for_each_entry(con, h, &connection_hash[r], list) {
+ if (con->nodeid == nodeid)
+ return con;
+ }
+ return NULL;
+}
+
/*
* If 'allocation' is zero then we don't attempt to create a new
* connection structure for this node.
@@ -154,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
{
struct connection *con = NULL;
int r;
- int n;
- con = idr_find(&connections_idr, nodeid);
+ con = __find_con(nodeid);
if (con || !alloc)
return con;
- r = idr_pre_get(&connections_idr, alloc);
- if (!r)
- return NULL;
-
con = kmem_cache_zalloc(con_cache, alloc);
if (!con)
return NULL;
- r = idr_get_new_above(&connections_idr, con, nodeid, &n);
- if (r) {
- kmem_cache_free(con_cache, con);
- return NULL;
- }
-
- if (n != nodeid) {
- idr_remove(&connections_idr, n);
- kmem_cache_free(con_cache, con);
- return NULL;
- }
+ r = nodeid_hash(nodeid);
+ hlist_add_head(&con->list, &connection_hash[r]);
con->nodeid = nodeid;
mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
/* Setup action pointers for child sockets */
if (con->nodeid) {
- struct connection *zerocon = idr_find(&connections_idr, 0);
+ struct connection *zerocon = __find_con(0);
con->connect_action = zerocon->connect_action;
if (!con->rx_action)
con->rx_action = zerocon->rx_action;
}
- if (nodeid > max_nodeid)
- max_nodeid = nodeid;
-
return con;
}
+/* Loop round all connections */
+static void foreach_conn(void (*conn_func)(struct connection *c))
+{
+ int i;
+ struct hlist_node *h, *n;
+ struct connection *con;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+ conn_func(con);
+ }
+ }
+}
+
static struct connection *nodeid2con(int nodeid, gfp_t allocation)
{
struct connection *con;
@@ -217,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
static struct connection *assoc2con(int assoc_id)
{
int i;
+ struct hlist_node *h;
struct connection *con;
mutex_lock(&connections_lock);
- for (i=0; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con && con->sctp_assoc == assoc_id) {
- mutex_unlock(&connections_lock);
- return con;
+
+ for (i = 0 ; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry(con, h, &connection_hash[i], list) {
+ if (con && con->sctp_assoc == assoc_id) {
+ mutex_unlock(&connections_lock);
+ return con;
+ }
}
}
mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
} else {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
- memcpy(&ret6->sin6_addr, &in6->sin6_addr,
- sizeof(in6->sin6_addr));
+ ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
}
return 0;
@@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
log_print("send EOF to node failed: %d", ret);
}
+static void sctp_init_failed_foreach(struct connection *con)
+{
+ con->sctp_assoc = 0;
+ if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+ if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+ queue_work(send_workqueue, &con->swork);
+ }
+}
+
/* INIT failed but we don't know which node...
restart INIT on all pending nodes */
static void sctp_init_failed(void)
{
- int i;
- struct connection *con;
-
mutex_lock(&connections_lock);
- for (i=1; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (!con)
- continue;
- con->sctp_assoc = 0;
- if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
- if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
- queue_work(send_workqueue, &con->swork);
- }
- }
- }
+
+ foreach_conn(sctp_init_failed_foreach);
+
mutex_unlock(&connections_lock);
}
@@ -1313,13 +1335,10 @@ out_connect:
static void clean_one_writequeue(struct connection *con)
{
- struct list_head *list;
- struct list_head *temp;
+ struct writequeue_entry *e, *safe;
spin_lock(&con->writequeue_lock);
- list_for_each_safe(list, temp, &con->writequeue) {
- struct writequeue_entry *e =
- list_entry(list, struct writequeue_entry, list);
+ list_for_each_entry_safe(e, safe, &con->writequeue, list) {
list_del(&e->list);
free_entry(e);
}
@@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work)
/* Discard all entries on the write queues */
static void clean_writequeues(void)
{
- int nodeid;
-
- for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
- struct connection *con = __nodeid2con(nodeid, 0);
-
- if (con)
- clean_one_writequeue(con);
- }
+ foreach_conn(clean_one_writequeue);
}
static void work_stop(void)
@@ -1406,23 +1418,29 @@ static int work_start(void)
return 0;
}
-void dlm_lowcomms_stop(void)
+static void stop_conn(struct connection *con)
{
- int i;
- struct connection *con;
+ con->flags |= 0x0F;
+ if (con->sock)
+ con->sock->sk->sk_user_data = NULL;
+}
+static void free_conn(struct connection *con)
+{
+ close_connection(con, true);
+ if (con->othercon)
+ kmem_cache_free(con_cache, con->othercon);
+ hlist_del(&con->list);
+ kmem_cache_free(con_cache, con);
+}
+
+void dlm_lowcomms_stop(void)
+{
/* Set all the flags to prevent any
socket activity.
*/
mutex_lock(&connections_lock);
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- con->flags |= 0x0F;
- if (con->sock)
- con->sock->sk->sk_user_data = NULL;
- }
- }
+ foreach_conn(stop_conn);
mutex_unlock(&connections_lock);
work_stop();
@@ -1430,25 +1448,20 @@ void dlm_lowcomms_stop(void)
mutex_lock(&connections_lock);
clean_writequeues();
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- close_connection(con, true);
- if (con->othercon)
- kmem_cache_free(con_cache, con->othercon);
- kmem_cache_free(con_cache, con);
- }
- }
- max_nodeid = 0;
+ foreach_conn(free_conn);
+
mutex_unlock(&connections_lock);
kmem_cache_destroy(con_cache);
- idr_init(&connections_idr);
}
int dlm_lowcomms_start(void)
{
int error = -EINVAL;
struct connection *con;
+ int i;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&connection_hash[i]);
init_local();
if (!dlm_local_count) {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 065149e84f42..ebce994ab0b7 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -84,7 +84,7 @@ struct dlm_lock_result32 {
static void compat_input(struct dlm_write_request *kb,
struct dlm_write_request32 *kb32,
- size_t count)
+ int namelen)
{
kb->version[0] = kb32->version[0];
kb->version[1] = kb32->version[1];
@@ -96,8 +96,7 @@ static void compat_input(struct dlm_write_request *kb,
kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
kb->i.lspace.flags = kb32->i.lspace.flags;
kb->i.lspace.minor = kb32->i.lspace.minor;
- memcpy(kb->i.lspace.name, kb32->i.lspace.name, count -
- offsetof(struct dlm_write_request32, i.lspace.name));
+ memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
} else if (kb->cmd == DLM_USER_PURGE) {
kb->i.purge.nodeid = kb32->i.purge.nodeid;
kb->i.purge.pid = kb32->i.purge.pid;
@@ -115,8 +114,7 @@ static void compat_input(struct dlm_write_request *kb,
kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
- memcpy(kb->i.lock.name, kb32->i.lock.name, count -
- offsetof(struct dlm_write_request32, i.lock.name));
+ memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
}
}
@@ -539,9 +537,16 @@ static ssize_t device_write(struct file *file, const char __user *buf,
#ifdef CONFIG_COMPAT
if (!kbuf->is64bit) {
struct dlm_write_request32 *k32buf;
+ int namelen = 0;
+
+ if (count > sizeof(struct dlm_write_request32))
+ namelen = count - sizeof(struct dlm_write_request32);
+
k32buf = (struct dlm_write_request32 *)kbuf;
- kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
- sizeof(struct dlm_write_request32)), GFP_KERNEL);
+
+ /* add 1 after namelen so that the name string is terminated */
+ kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
+ GFP_KERNEL);
if (!kbuf) {
kfree(k32buf);
return -ENOMEM;
@@ -549,7 +554,8 @@ static ssize_t device_write(struct file *file, const char __user *buf,
if (proc)
set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
- compat_input(kbuf, k32buf, count + 1);
+
+ compat_input(kbuf, k32buf, namelen);
kfree(k32buf);
}
#endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 3e5637fc3779..44d725f612cf 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
if (inode->i_mapping->nrpages == 0)
continue;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 5e596583946c..2dda5ade75bc 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -89,7 +89,7 @@ static void ecryptfs_d_release(struct dentry *dentry)
return;
}
-struct dentry_operations ecryptfs_dops = {
+const struct dentry_operations ecryptfs_dops = {
.d_revalidate = ecryptfs_d_revalidate,
.d_release = ecryptfs_d_release,
};
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ac749d4d644f..064c5820e4e5 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -580,7 +580,7 @@ extern const struct inode_operations ecryptfs_main_iops;
extern const struct inode_operations ecryptfs_dir_iops;
extern const struct inode_operations ecryptfs_symlink_iops;
extern const struct super_operations ecryptfs_sops;
-extern struct dentry_operations ecryptfs_dops;
+extern const struct dentry_operations ecryptfs_dops;
extern struct address_space_operations ecryptfs_aops;
extern int ecryptfs_verbosity;
extern unsigned int ecryptfs_message_buf_len;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 4a29d6376081..7f8d2e5a7ea6 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -570,7 +570,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
- DQUOT_FREE_BLOCK(inode, freed);
+ vfs_dq_free_block(inode, freed);
}
/**
@@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, num)) {
+ if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1409,7 +1409,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- DQUOT_FREE_BLOCK(inode, *count-num);
+ vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@@ -1420,7 +1420,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, *count);
+ vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 66321a877e74..15387c9c17d8 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
if (!is_bad_inode(inode)) {
/* Quota is already initialized in iput() */
ext2_xattr_delete_inode(inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
}
es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@ got:
goto fail_drop;
}
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -605,10 +605,10 @@ got:
return inode;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f87783..b43b95563663 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
return error;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
- error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
if (error)
return error;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7c6e3606f0ec..f983225266dc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
+ tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 0);
if (err < 0)
return err;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 987a5261cc2e..7913531ec6d5 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ea_bdebug(new_bh, "reusing block");
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (vfs_dq_alloc_block(inode, 1)) {
unlock_buffer(new_bh);
goto cleanup;
}
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh)
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
}
} else
@@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0dbf1c048475..225202db8974 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
if (dquot_freed_blocks)
- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}
@@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, num)) {
+ if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1714,7 +1714,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- DQUOT_FREE_BLOCK(inode, *count-num);
+ vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@@ -1729,7 +1729,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, *count);
+ vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 8de6c720e510..dd13d60d524b 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
ext3_xattr_delete_inode(handle, inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -589,7 +589,7 @@ got:
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -620,10 +620,10 @@ really_out:
return ret;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05e5c2e5c0d7..4a09ff169870 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3063,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext3_journal_stop(handle);
return error;
@@ -3154,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
ret = 2 * (bpp + indirects) + 2;
#ifdef CONFIG_QUOTA
- /* We know that structure was already allocated during DQUOT_INIT so
+ /* We know that structure was already allocated during vfs_dq_init so
* we will be updating only the data blocks + inodes */
ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
#endif
@@ -3245,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4db4ffa1edad..e2fc63cbba8b 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
- DQUOT_INIT(new_dentry->d_inode);
+ vfs_dq_init(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4a970411a458..9e5b8e387e1e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext3_dquot_initialize(struct inode *inode, int type);
-static int ext3_dquot_drop(struct inode *inode);
static int ext3_write_dquot(struct dquot *dquot);
static int ext3_acquire_dquot(struct dquot *dquot);
static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext3_quota_operations = {
- .initialize = ext3_dquot_initialize,
- .drop = ext3_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
- * DQUOT_INIT() down(dqio_mutex)
+ * vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) journal_start()
*
*/
@@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext3_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext3_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext3_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 175414ac2210..83b7be849bd5 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@@ -774,7 +774,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1))
+ if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext3_journal_get_write_access(handle,
new_bh);
@@ -848,7 +848,7 @@ cleanup:
return error;
cleanup_dquot:
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index de9459b4cb94..38f40d55899c 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
ext4_mb_free_blocks(handle, inode, block, count,
metadata, &dquot_freed_blocks);
if (dquot_freed_blocks)
- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c87dce66a3..6083bb38057b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/magic.h>
#include <linux/jbd2.h>
+#include <linux/quota.h>
#include "ext4_i.h"
/*
@@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern qsize_t ext4_get_reserved_space(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d2b3585ee91..fb51b40e3e8f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
ext4_xattr_delete_inode(handle, inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -915,7 +915,7 @@ got:
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -956,10 +956,10 @@ really_out:
return ret;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c7fed5b18745..71d3ecd5db79 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -975,6 +975,17 @@ out:
return err;
}
+qsize_t ext4_get_reserved_space(struct inode *inode)
+{
+ unsigned long long total;
+
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ total = EXT4_I(inode)->i_reserved_data_blocks +
+ EXT4_I(inode)->i_reserved_meta_blocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ return total;
+}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
/* update per-inode reservations */
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
EXT4_I(inode)->i_reserved_data_blocks -= used;
-
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ /*
+ * free those over-booking quota for metadata blocks
+ */
+
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
}
/*
@@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
{
int retries = 0;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned long md_needed, mdblocks, total = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned long md_needed, mdblocks, total = 0;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@ repeat:
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
+ /*
+ * Make quota reservation here to prevent quota overflow
+ * later. Real quota accounting is done at pages writeout
+ * time.
+ */
+ if (vfs_dq_reserve_block(inode, total)) {
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ return -EDQUOT;
+ }
+
if (ext4_claim_free_blocks(sbi, total)) {
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
+ vfs_dq_release_reservation_block(inode, total);
return -ENOSPC;
}
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ vfs_dq_release_reservation_block(inode, release);
}
static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext4_journal_stop(handle);
return error;
@@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f61e62f435f..b038188bd039 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else
+ else {
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ac->ac_b_ex.fe_len);
+ /* convert reserved quota blocks to real quota blocks */
+ vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+ }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
- unsigned int inquota;
+ unsigned int inquota = 0;
unsigned int reserv_blks = 0;
sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
(unsigned long long) ar->pleft,
(unsigned long long) ar->pright);
- if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
- /*
- * With delalloc we already reserved the blocks
+ /*
+ * For delayed allocation, we could skip the ENOSPC and
+ * EDQUOT check, as blocks and quotas have been already
+ * reserved when data being copied into pagecache.
+ */
+ if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+ ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+ else {
+ /* Without delayed allocation we need to verify
+ * there is enough free blocks to do block allocation
+ * and verify allocation doesn't exceed the quota limits.
*/
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
/* let others to free the space */
@@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
+ while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
+ ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+ ar->len--;
+ }
+ inquota = ar->len;
+ if (ar->len == 0) {
+ *errp = -EDQUOT;
+ goto out3;
+ }
}
- while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
- ar->flags |= EXT4_MB_HINT_NOPREALLOC;
- ar->len--;
- }
- if (ar->len == 0) {
- *errp = -EDQUOT;
- goto out3;
- }
- inquota = ar->len;
-
- if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
- ar->flags |= EXT4_MB_DELALLOC_RESERVED;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (!ac) {
@@ -4654,8 +4662,8 @@ repeat:
out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
- if (ar->len < inquota)
- DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
+ if (inquota && ar->len < inquota)
+ vfs_dq_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ba702bd7910d..83410244d3ee 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
- DQUOT_INIT(new_dentry->d_inode);
+ vfs_dq_init(new_dentry->d_inode);
handle = ext4_journal_start(old_dir, 2 *
EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 39d1993cfa13..f7371a6a923d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext4_dquot_initialize(struct inode *inode, int type);
-static int ext4_dquot_drop(struct inode *inode);
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext4_quota_operations = {
- .initialize = ext4_dquot_initialize,
- .drop = ext4_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
+ .reserve_space = dquot_reserve_space,
+ .claim_space = dquot_claim_space,
+ .release_rsv = dquot_release_reserved_space,
+ .get_reserved_space = ext4_get_reserved_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
@@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
* is locked for write. Otherwise the are possible deadlocks:
* Process 1 Process 2
* ext4_create() quota_sync()
- * jbd2_journal_start() write_dquot()
- * DQUOT_INIT() down(dqio_mutex)
+ * jbd2_journal_start() write_dquot()
+ * vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) jbd2_journal_start()
*
*/
@@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext4_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext4_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 157ce6589c54..62b31c246994 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@@ -784,7 +784,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1))
+ if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext4_journal_get_write_access(handle,
new_bh);
@@ -860,7 +860,7 @@ cleanup:
return error;
cleanup_dquot:
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7ba03a4acbe0..da3f361a37dd 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -188,7 +188,7 @@ old_compare:
goto out;
}
-static struct dentry_operations msdos_dentry_operations = {
+static const struct dentry_operations msdos_dentry_operations = {
.d_hash = msdos_hash,
.d_compare = msdos_cmp,
};
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 8ae32e37673c..a0e00e3a46e9 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -166,13 +166,13 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
return 1;
}
-static struct dentry_operations vfat_ci_dentry_ops = {
+static const struct dentry_operations vfat_ci_dentry_ops = {
.d_revalidate = vfat_revalidate_ci,
.d_hash = vfat_hashi,
.d_compare = vfat_cmpi,
};
-static struct dentry_operations vfat_dentry_ops = {
+static const struct dentry_operations vfat_dentry_ops = {
.d_revalidate = vfat_revalidate,
.d_hash = vfat_hash,
.d_compare = vfat_cmp,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fdff346e96fd..06da05261e04 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -224,7 +224,7 @@ static int invalid_nodeid(u64 nodeid)
return !nodeid || nodeid == FUSE_ROOT_ID;
}
-struct dentry_operations fuse_dentry_operations = {
+const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
};
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5e64b815a5a1..6fc5aedaa0d5 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -493,7 +493,7 @@ static inline u64 get_node_id(struct inode *inode)
/** Device operations */
extern const struct file_operations fuse_dev_operations;
-extern struct dentry_operations fuse_dentry_operations;
+extern const struct dentry_operations fuse_dentry_operations;
/**
* Get a filled in inode
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 5eb57b044382..022c66cd5606 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -107,7 +107,7 @@ static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
return 0;
}
-struct dentry_operations gfs2_dops = {
+const struct dentry_operations gfs2_dops = {
.d_revalidate = gfs2_drevalidate,
.d_hash = gfs2_dhash,
};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 91abdbedcc86..b56413e3e40d 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -49,7 +49,7 @@ extern struct file_system_type gfs2_fs_type;
extern struct file_system_type gfs2meta_fs_type;
extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
-extern struct dentry_operations gfs2_dops;
+extern const struct dentry_operations gfs2_dops;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 9955232fdf8c..052387e11671 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,7 +213,7 @@ extern void hfs_mdb_put(struct super_block *);
extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
/* string.c */
-extern struct dentry_operations hfs_dentry_operations;
+extern const struct dentry_operations hfs_dentry_operations;
extern int hfs_hash_dentry(struct dentry *, struct qstr *);
extern int hfs_strcmp(const unsigned char *, unsigned int,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 5bf89ec01cd4..7478f5c219aa 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -31,7 +31,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
return 1;
}
-struct dentry_operations hfs_dentry_operations =
+const struct dentry_operations hfs_dentry_operations =
{
.d_revalidate = hfs_revalidate_dentry,
.d_hash = hfs_hash_dentry,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f027a905225f..5c10d803d9df 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -327,7 +327,7 @@ void hfsplus_file_truncate(struct inode *);
/* inode.c */
extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
-extern struct dentry_operations hfsplus_dentry_operations;
+extern const struct dentry_operations hfsplus_dentry_operations;
void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f105ee9e1cc4..1bcf597c0562 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -137,7 +137,7 @@ const struct address_space_operations hfsplus_aops = {
.writepages = hfsplus_writepages,
};
-struct dentry_operations hfsplus_dentry_operations = {
+const struct dentry_operations hfsplus_dentry_operations = {
.d_hash = hfsplus_hash_dentry,
.d_compare = hfsplus_compare_dentry,
};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c538e0ec14b..fe02ad4740e7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -31,12 +31,12 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
-int hostfs_d_delete(struct dentry *dentry)
+static int hostfs_d_delete(struct dentry *dentry)
{
return 1;
}
-struct dentry_operations hostfs_dentry_ops = {
+static const struct dentry_operations hostfs_dentry_ops = {
.d_delete = hostfs_d_delete,
};
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 08319126b2af..940d6d150bee 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -49,7 +49,7 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
return 0;
}
-static struct dentry_operations hpfs_dentry_operations = {
+static const struct dentry_operations hpfs_dentry_operations = {
.d_hash = hpfs_hash_dentry,
.d_compare = hpfs_compare_dentry,
};
diff --git a/fs/inode.c b/fs/inode.c
index 643ac43e5a5c..d06d6d268de9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
inode_sync_wait(inode);
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
if (inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -366,6 +366,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
if (tmp == head)
break;
inode = list_entry(tmp, struct inode, i_sb_list);
+ if (inode->i_state & I_NEW)
+ continue;
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
list_move(&inode->i_list, dispose);
@@ -1168,7 +1170,7 @@ void generic_delete_inode(struct inode *inode)
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
if (!is_bad_inode(inode))
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
/* Filesystems implementing their own
* s_op->delete_inode are required to call
* truncate_inode_pages and clear_inode()
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6147ec3643a0..13d2eddd0692 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@ static const struct super_operations isofs_sops = {
};
-static struct dentry_operations isofs_dentry_ops[] = {
+static const struct dentry_operations isofs_dentry_ops[] = {
{
.d_hash = isofs_hash,
.d_compare = isofs_dentry_cmp,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index d3e5c33665de..a166c1669e82 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
- if (DQUOT_TRANSFER(inode, iattr))
+ if (vfs_dq_transfer(inode, iattr))
return -EDQUOT;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b00ee9f05a06..b2ae190a77ba 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
- DQUOT_INIT(inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_init(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
}
clear_inode(inode);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 4dcc05819998..925871e9887b 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
- if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
+ if (vfs_dq_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
- DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+ vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
- DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+ vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
n = xlen;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, n)) {
+ if (vfs_dq_alloc_block(ip, n)) {
rc = -EDQUOT;
goto extendOut;
}
@@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
/* Rollback quota allocation */
if (rc && quota_allocation)
- DQUOT_FREE_BLOCK(ip, quota_allocation);
+ vfs_dq_free_block(ip, quota_allocation);
dtSplitUp_Exit:
@@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
return -EIO;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
@@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e3281de9..169802ea07f9 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
}
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+ if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
@@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
goto exit;
/* Allocat blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+ if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
goto exit;
}
} else {
@@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
goto exit;
}
}
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index d4d142c2edd4..dc0e02159ac9 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
/*
* Allocate inode to quota.
*/
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
rc = -EDQUOT;
goto fail_drop;
}
@@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
return inode;
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
fail_unlock:
inode->i_nlink = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index adb2fafcc544..1eff7db34d63 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -47,5 +47,5 @@ extern const struct file_operations jfs_dir_operations;
extern const struct inode_operations jfs_file_inode_operations;
extern const struct file_operations jfs_file_operations;
extern const struct inode_operations jfs_symlink_inode_operations;
-extern struct dentry_operations jfs_ci_dentry_operations;
+extern const struct dentry_operations jfs_ci_dentry_operations;
#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acafb447b..a27e26c90568 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
- if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
+ if ((rc = vfs_dq_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
goto out;
}
}
@@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
}
return rc;
}
@@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
@@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
/* Rollback quota allocation. */
if (quota_allocation)
- DQUOT_FREE_BLOCK(ip, quota_allocation);
+ vfs_dq_free_block(ip, quota_allocation);
return (rc);
}
@@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
return -EIO;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
- DQUOT_FREE_BLOCK(ip, nfreed);
+ vfs_dq_free_block(ip, nfreed);
/*
* free tlock of invalidated pages
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b4de56b851e4..514ee2edb92a 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -35,7 +35,7 @@
/*
* forward references
*/
-struct dentry_operations jfs_ci_dentry_operations;
+const struct dentry_operations jfs_ci_dentry_operations;
static s64 commitZeroLink(tid_t, struct inode *);
@@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
- DQUOT_INIT(ip);
+ vfs_dq_init(ip);
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
- DQUOT_INIT(ip);
+ vfs_dq_init(ip);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} else if (new_ip) {
IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */
- DQUOT_INIT(new_ip);
+ vfs_dq_init(new_ip);
}
/*
@@ -1595,7 +1595,7 @@ out:
return result;
}
-struct dentry_operations jfs_ci_dentry_operations =
+const struct dentry_operations jfs_ci_dentry_operations =
{
.d_hash = jfs_ci_hash,
.d_compare = jfs_ci_compare,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9b7f2cdaae0a..61dfa8173ebc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
+ if (vfs_dq_alloc_block(ip, nblocks)) {
return -EDQUOT;
}
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
- DQUOT_FREE_BLOCK(ip, nblocks);
+ vfs_dq_free_block(ip, nblocks);
return rc;
}
@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
failed:
/* Rollback quota allocation. */
- DQUOT_FREE_BLOCK(ip, nblocks);
+ vfs_dq_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
@@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
+ if (vfs_dq_alloc_block(inode, blocks_needed))
return -EDQUOT;
quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
- DQUOT_FREE_BLOCK(inode, quota_allocation);
+ vfs_dq_free_block(inode, quota_allocation);
return (rc);
}
@@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
- DQUOT_FREE_BLOCK(inode, old_blocks);
+ vfs_dq_free_block(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;
diff --git a/fs/libfs.c b/fs/libfs.c
index 49b44099dabb..4910a36f516e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -44,7 +44,7 @@ static int simple_delete_dentry(struct dentry *dentry)
*/
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
- static struct dentry_operations simple_dentry_operations = {
+ static const struct dentry_operations simple_dentry_operations = {
.d_delete = simple_delete_dentry,
};
@@ -242,7 +242,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
Enomem:
up_write(&s->s_umount);
diff --git a/fs/namei.c b/fs/namei.c
index 199317642ad6..d040ce11785d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
@@ -1489,24 +1489,22 @@ int may_open(struct path *path, int acc_mode, int flag)
if (!inode)
return -ENOENT;
- if (S_ISLNK(inode->i_mode))
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFLNK:
return -ELOOP;
-
- if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
- return -EISDIR;
-
- /*
- * FIFO's, sockets and device files are special: they don't
- * actually live on the filesystem itself, and as such you
- * can write to them even if the filesystem is read-only.
- */
- if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- flag &= ~O_TRUNC;
- } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+ case S_IFDIR:
+ if (acc_mode & MAY_WRITE)
+ return -EISDIR;
+ break;
+ case S_IFBLK:
+ case S_IFCHR:
if (path->mnt->mnt_flags & MNT_NODEV)
return -EACCES;
-
+ /*FALLTHRU*/
+ case S_IFIFO:
+ case S_IFSOCK:
flag &= ~O_TRUNC;
+ break;
}
error = inode_permission(inode, acc_mode);
@@ -1552,7 +1550,7 @@ int may_open(struct path *path, int acc_mode, int flag)
error = security_path_truncate(path, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
if (!error) {
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
error = do_truncate(dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1561,7 @@ int may_open(struct path *path, int acc_mode, int flag)
return error;
} else
if (flag & FMODE_WRITE)
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
return 0;
}
@@ -1946,7 +1944,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
@@ -2045,7 +2043,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2129,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->rmdir)
return -EPERM;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
@@ -2218,7 +2216,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->unlink)
return -EPERM;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
@@ -2329,7 +2327,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
@@ -2413,7 +2411,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
return error;
mutex_lock(&inode->i_mutex);
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
@@ -2612,8 +2610,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir->i_op->rename)
return -EPERM;
- DQUOT_INIT(old_dir);
- DQUOT_INIT(new_dir);
+ vfs_dq_init(old_dir);
+ vfs_dq_init(new_dir);
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
diff --git a/fs/namespace.c b/fs/namespace.c
index f0e753097353..0a42e0e96027 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,11 +397,10 @@ static void __mnt_unmake_readonly(struct vfsmount *mnt)
spin_unlock(&vfsmount_lock);
}
-int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
{
mnt->mnt_sb = sb;
mnt->mnt_root = dget(sb->s_root);
- return 0;
}
EXPORT_SYMBOL(simple_set_mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 07e9715b8658..9c590722d87e 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -79,7 +79,7 @@ static int ncp_hash_dentry(struct dentry *, struct qstr *);
static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
static int ncp_delete_dentry(struct dentry *);
-static struct dentry_operations ncp_dentry_operations =
+static const struct dentry_operations ncp_dentry_operations =
{
.d_revalidate = ncp_lookup_validate,
.d_hash = ncp_hash_dentry,
@@ -87,7 +87,7 @@ static struct dentry_operations ncp_dentry_operations =
.d_delete = ncp_delete_dentry,
};
-struct dentry_operations ncp_root_dentry_operations =
+const struct dentry_operations ncp_root_dentry_operations =
{
.d_hash = ncp_hash_dentry,
.d_compare = ncp_compare_dentry,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 672368f865ca..78bf72fc1db3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -899,7 +899,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
iput(inode);
}
-struct dentry_operations nfs_dentry_operations = {
+const struct dentry_operations nfs_dentry_operations = {
.d_revalidate = nfs_lookup_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
@@ -967,7 +967,7 @@ out:
#ifdef CONFIG_NFS_V4
static int nfs_open_revalidate(struct dentry *, struct nameidata *);
-struct dentry_operations nfs4_dentry_operations = {
+const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs_open_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4e4d33204376..84345deab26f 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -179,7 +179,7 @@ struct nfs4_state_recovery_ops {
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
};
-extern struct dentry_operations nfs4_dentry_operations;
+extern const struct dentry_operations nfs4_dentry_operations;
extern const struct inode_operations nfs4_dir_inode_operations;
/* inode.c */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c165a6403df0..78376b6c0236 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
put_write_access(inode);
goto out_nfserr;
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
}
/* sanitize the mode change */
@@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
else
flags = O_WRONLY|O_LARGEFILE;
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
flags, cred);
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 331f2e88e284..220c13f0d73d 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -380,6 +380,14 @@ void inotify_unmount_inodes(struct list_head *list)
struct list_head *watches;
/*
+ * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+ * I_WILL_FREE, or I_NEW which is fine because by that point
+ * the inode cannot have any associated watches.
+ */
+ if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
+
+ /*
* If i_count is zero, the inode cannot have any watches and
* doing an __iget/iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
@@ -388,14 +396,6 @@ void inotify_unmount_inodes(struct list_head *list)
if (!atomic_read(&inode->i_count))
continue;
- /*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
- * I_WILL_FREE which is fine because by that point the inode
- * cannot have any associated watches.
- */
- if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
- continue;
-
need_iput_tmp = need_iput;
need_iput = NULL;
/* In case inotify_remove_watch_locked() drops a reference. */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e9d7c2038c0f..7d604480557a 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -455,7 +455,7 @@ out_move:
d_move(dentry, target);
}
-struct dentry_operations ocfs2_dentry_ops = {
+const struct dentry_operations ocfs2_dentry_ops = {
.d_revalidate = ocfs2_dentry_revalidate,
.d_iput = ocfs2_dentry_iput,
};
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index d06e16c06640..faa12e75f98d 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -26,7 +26,7 @@
#ifndef OCFS2_DCACHE_H
#define OCFS2_DCACHE_H
-extern struct dentry_operations ocfs2_dentry_ops;
+extern const struct dentry_operations ocfs2_dentry_ops;
struct ocfs2_dentry_lock {
/* Use count of dentry lock */
diff --git a/fs/open.c b/fs/open.c
index a3a78ceb2a2b..75b61677daaf 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
if (!error)
error = security_path_truncate(&path, length, 0);
if (!error) {
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
error = do_truncate(path.dentry, length, 0, NULL);
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 94ad15967cf9..4af7aa521813 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -860,7 +860,7 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
dentry->d_inode->i_ino);
}
-static struct dentry_operations pipefs_dentry_operations = {
+static const struct dentry_operations pipefs_dentry_operations = {
.d_delete = pipefs_delete_dentry,
.d_dname = pipefs_dname,
};
@@ -1024,11 +1024,6 @@ int do_pipe_flags(int *fd, int flags)
return error;
}
-int do_pipe(int *fd)
-{
- return do_pipe_flags(fd, 0);
-}
-
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index beaa0ce3b82e..aef6d55b7de6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1545,7 +1545,7 @@ static int pid_delete_dentry(struct dentry * dentry)
return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
}
-static struct dentry_operations pid_dentry_operations =
+static const struct dentry_operations pid_dentry_operations =
{
.d_revalidate = pid_revalidate,
.d_delete = pid_delete_dentry,
@@ -1717,7 +1717,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
return 0;
}
-static struct dentry_operations tid_fd_dentry_operations =
+static const struct dentry_operations tid_fd_dentry_operations =
{
.d_revalidate = tid_fd_revalidate,
.d_delete = pid_delete_dentry,
@@ -2339,7 +2339,7 @@ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
return 0;
}
-static struct dentry_operations proc_base_dentry_operations =
+static const struct dentry_operations proc_base_dentry_operations =
{
.d_revalidate = proc_base_revalidate,
.d_delete = pid_delete_dentry,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5cab988..5d2989e9dcc1 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -363,7 +363,7 @@ static int proc_delete_dentry(struct dentry * dentry)
return 1;
}
-static struct dentry_operations proc_dentry_operations =
+static const struct dentry_operations proc_dentry_operations =
{
.d_delete = proc_delete_dentry,
};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 94fcfff6863a..9b1e4e9a16bf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -7,7 +7,7 @@
#include <linux/security.h>
#include "internal.h"
-static struct dentry_operations proc_sys_dentry_operations;
+static const struct dentry_operations proc_sys_dentry_operations;
static const struct file_operations proc_sys_file_operations;
static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
@@ -396,7 +396,7 @@ static int proc_sys_compare(struct dentry *dir, struct qstr *qstr,
return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
}
-static struct dentry_operations proc_sys_dentry_operations = {
+static const struct dentry_operations proc_sys_dentry_operations = {
.d_revalidate = proc_sys_revalidate,
.d_delete = proc_sys_delete,
.d_compare = proc_sys_compare,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f6299a25594e..1e15a2b176e8 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -83,7 +83,8 @@ static int proc_get_sb(struct file_system_type *fs_type,
ns->proc_mnt = mnt;
}
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
}
static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000000..8047e01ef46b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
+#
+# Quota configuration
+#
+
+config QUOTA
+ bool "Quota support"
+ help
+ If you say Y here, you will be able to set per user limits for disk
+ usage (also called disk quotas). Currently, it works for the
+ ext2, ext3, and reiserfs file system. ext3 also supports journalled
+ quotas for which you don't need to run quotacheck(8) after an unclean
+ shutdown.
+ For further details, read the Quota mini-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, or the documentation provided
+ with the quota tools. Probably the quota support is only useful for
+ multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+ bool "Report quota messages through netlink interface"
+ depends on QUOTA && NET
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be reported through netlink interface. If unsure,
+ say Y.
+
+config PRINT_QUOTA_WARNING
+ bool "Print quota warnings to console (OBSOLETE)"
+ depends on QUOTA
+ default y
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be printed to the process' controlling terminal.
+ Note that this behavior is currently deprecated and may go away in
+ future. Please use notification via netlink socket instead.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+ tristate
+
+config QFMT_V1
+ tristate "Old quota format support"
+ depends on QUOTA
+ help
+ This quota format was (is) used by kernels earlier than 2.4.22. If
+ you have quota working and you don't want to convert to new quota
+ format say Y here.
+
+config QFMT_V2
+ tristate "Quota format v2 support"
+ depends on QUOTA
+ select QUOTA_TREE
+ help
+ This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+ need this functionality say Y here.
+
+config QUOTACTL
+ bool
+ depends on XFS_QUOTA || QUOTA
+ default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000000..385a0831cc99
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y :=
+
+obj-$(CONFIG_QUOTA) += dquot.o
+obj-$(CONFIG_QFMT_V1) += quota_v1.o
+obj-$(CONFIG_QFMT_V2) += quota_v2.o
+obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
+obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/dquot.c b/fs/quota/dquot.c
index d6add0bf5ad3..2ca967a5ef77 100644
--- a/fs/dquot.c
+++ b/fs/quota/dquot.c
@@ -129,9 +129,10 @@
* i_mutex on quota files is special (it's below dqio_mutex)
*/
-static DEFINE_SPINLOCK(dq_list_lock);
-static DEFINE_SPINLOCK(dq_state_lock);
-DEFINE_SPINLOCK(dq_data_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
static char *quotatypes[] = INITQFNAMES;
static struct quota_format_type *quota_formats; /* List of registered formats */
@@ -148,35 +149,46 @@ int register_quota_format(struct quota_format_type *fmt)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(register_quota_format);
void unregister_quota_format(struct quota_format_type *fmt)
{
struct quota_format_type **actqf;
spin_lock(&dq_list_lock);
- for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
+ for (actqf = &quota_formats; *actqf && *actqf != fmt;
+ actqf = &(*actqf)->qf_next)
+ ;
if (*actqf)
*actqf = (*actqf)->qf_next;
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(unregister_quota_format);
static struct quota_format_type *find_quota_format(int id)
{
struct quota_format_type *actqf;
spin_lock(&dq_list_lock);
- for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
if (!actqf || !try_module_get(actqf->qf_owner)) {
int qm;
spin_unlock(&dq_list_lock);
- for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
- if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
+ for (qm = 0; module_names[qm].qm_fmt_id &&
+ module_names[qm].qm_fmt_id != id; qm++)
+ ;
+ if (!module_names[qm].qm_fmt_id ||
+ request_module(module_names[qm].qm_mod_name))
return NULL;
spin_lock(&dq_list_lock);
- for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
if (actqf && !try_module_get(actqf->qf_owner))
actqf = NULL;
}
@@ -215,6 +227,7 @@ static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
static inline unsigned int
hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -230,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
*/
static inline void insert_dquot_hash(struct dquot *dquot)
{
- struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+ struct hlist_head *head;
+ head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
hlist_add_head(&dquot->dq_hash, head);
}
@@ -239,17 +253,19 @@ static inline void remove_dquot_hash(struct dquot *dquot)
hlist_del_init(&dquot->dq_hash);
}
-static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+ unsigned int id, int type)
{
struct hlist_node *node;
struct dquot *dquot;
hlist_for_each (node, dquot_hash+hashent) {
dquot = hlist_entry(node, struct dquot, dq_hash);
- if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
+ if (dquot->dq_sb == sb && dquot->dq_id == id &&
+ dquot->dq_type == type)
return dquot;
}
- return NODQUOT;
+ return NULL;
}
/* Add a dquot to the tail of the free list */
@@ -309,6 +325,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* This function needs dq_list_lock */
static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -345,8 +362,10 @@ int dquot_acquire(struct dquot *dquot)
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
/* Write the info if needed */
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret < 0)
goto out_iolock;
if (ret2 < 0) {
@@ -360,6 +379,7 @@ out_iolock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_acquire);
/*
* Write dquot to disk
@@ -380,8 +400,10 @@ int dquot_commit(struct dquot *dquot)
* => we have better not writing it */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret >= 0)
ret = ret2;
}
@@ -389,6 +411,7 @@ out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit);
/*
* Release dquot
@@ -406,8 +429,10 @@ int dquot_release(struct dquot *dquot)
if (dqopt->ops[dquot->dq_type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
/* Write the info */
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret >= 0)
ret = ret2;
}
@@ -417,6 +442,7 @@ out_dqlock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_release);
void dquot_destroy(struct dquot *dquot)
{
@@ -516,6 +542,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_scan_active);
int vfs_quota_sync(struct super_block *sb, int type)
{
@@ -533,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type)
spin_lock(&dq_list_lock);
dirty = &dqopt->info[cnt].dqi_dirty_list;
while (!list_empty(dirty)) {
- dquot = list_first_entry(dirty, struct dquot, dq_dirty);
+ dquot = list_first_entry(dirty, struct dquot,
+ dq_dirty);
/* Dirty and inactive can be only bad dquot... */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
clear_dquot_dirty(dquot);
@@ -563,6 +591,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
return 0;
}
+EXPORT_SYMBOL(vfs_quota_sync);
/* Free unused dquots from cache */
static void prune_dqcache(int count)
@@ -672,6 +701,7 @@ we_slept:
put_dquot_last(dquot);
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(dqput);
struct dquot *dquot_alloc(struct super_block *sb, int type)
{
@@ -685,7 +715,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
dquot = sb->dq_op->alloc_dquot(sb, type);
if(!dquot)
- return NODQUOT;
+ return NULL;
mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
@@ -711,10 +741,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
unsigned int hashent = hashfn(sb, id, type);
- struct dquot *dquot = NODQUOT, *empty = NODQUOT;
+ struct dquot *dquot = NULL, *empty = NULL;
if (!sb_has_quota_active(sb, type))
- return NODQUOT;
+ return NULL;
we_slept:
spin_lock(&dq_list_lock);
spin_lock(&dq_state_lock);
@@ -725,15 +755,17 @@ we_slept:
}
spin_unlock(&dq_state_lock);
- if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
- if (empty == NODQUOT) {
+ dquot = find_dquot(hashent, sb, id, type);
+ if (!dquot) {
+ if (!empty) {
spin_unlock(&dq_list_lock);
- if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
+ empty = get_empty_dquot(sb, type);
+ if (!empty)
schedule(); /* Try to wait for a moment... */
goto we_slept;
}
dquot = empty;
- empty = NODQUOT;
+ empty = NULL;
dquot->dq_id = id;
/* all dquots go on the inuse_list */
put_inuse(dquot);
@@ -749,13 +781,14 @@ we_slept:
dqstats.lookups++;
spin_unlock(&dq_list_lock);
}
- /* Wait for dq_lock - after this we know that either dquot_release() is already
- * finished or it will be canceled due to dq_count > 1 test */
+ /* Wait for dq_lock - after this we know that either dquot_release() is
+ * already finished or it will be canceled due to dq_count > 1 test */
wait_on_dquot(dquot);
- /* Read the dquot and instantiate it (everything done only if needed) */
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
+ /* Read the dquot / allocate space in quota file */
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
+ sb->dq_op->acquire_dquot(dquot) < 0) {
dqput(dquot);
- dquot = NODQUOT;
+ dquot = NULL;
goto out;
}
#ifdef __DQUOT_PARANOIA
@@ -767,6 +800,7 @@ out:
return dquot;
}
+EXPORT_SYMBOL(dqget);
static int dqinit_needed(struct inode *inode, int type)
{
@@ -775,9 +809,9 @@ static int dqinit_needed(struct inode *inode, int type)
if (IS_NOQUOTA(inode))
return 0;
if (type != -1)
- return inode->i_dquot[type] == NODQUOT;
+ return !inode->i_dquot[type];
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
return 1;
return 0;
}
@@ -789,12 +823,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
if (!atomic_read(&inode->i_writecount))
continue;
if (!dqinit_needed(inode, type))
continue;
- if (inode->i_state & (I_FREEING|I_WILL_FREE))
- continue;
__iget(inode);
spin_unlock(&inode_lock);
@@ -813,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type)
iput(old_inode);
}
-/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
+/*
+ * Return 0 if dqput() won't block.
+ * (note that 1 doesn't necessarily mean blocking)
+ */
static inline int dqput_blocks(struct dquot *dquot)
{
if (atomic_read(&dquot->dq_count) <= 1)
@@ -821,22 +858,27 @@ static inline int dqput_blocks(struct dquot *dquot)
return 0;
}
-/* Remove references to dquots from inode - add dquot to list for freeing if needed */
-/* We can't race with anybody because we hold dqptr_sem for writing... */
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last referece to dquot
+ * We can't race with anybody because we hold dqptr_sem for writing...
+ */
static int remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head)
{
struct dquot *dquot = inode->i_dquot[type];
- inode->i_dquot[type] = NODQUOT;
- if (dquot != NODQUOT) {
+ inode->i_dquot[type] = NULL;
+ if (dquot) {
if (dqput_blocks(dquot)) {
#ifdef __DQUOT_PARANOIA
if (atomic_read(&dquot->dq_count) != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
#endif
spin_lock(&dq_list_lock);
- list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
+ /* As dquot must have currently users it can't be on
+ * the free list... */
+ list_add(&dquot->dq_free, tofree_head);
spin_unlock(&dq_list_lock);
return 1;
}
@@ -846,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
return 0;
}
-/* Free list of dquots - called from inode.c */
-/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
static void put_dquot_list(struct list_head *tofree_head)
{
struct list_head *act_head;
struct dquot *dquot;
act_head = tofree_head->next;
- /* So now we have dquots on the list... Just free them */
while (act_head != tofree_head) {
dquot = list_entry(act_head, struct dquot, dq_free);
act_head = act_head->next;
- list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
+ /* Remove dquot from the list so we won't have problems... */
+ list_del_init(&dquot->dq_free);
dqput(dquot);
}
}
@@ -870,6 +915,12 @@ static void remove_dquot_ref(struct super_block *sb, int type,
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ /*
+ * We have to scan also I_NEW inodes because they can already
+ * have quota pointer initialized. Luckily, we need to touch
+ * only quota pointers and these have separate locking
+ * (dqptr_sem).
+ */
if (!IS_NOQUOTA(inode))
remove_inode_dquot_ref(inode, type, tofree_head);
}
@@ -899,7 +950,29 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
dquot->dq_dqb.dqb_curspace += number;
}
-static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */
+static void dquot_claim_reserved_space(struct dquot *dquot,
+ qsize_t number)
+{
+ WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
+ dquot->dq_dqb.dqb_curspace += number;
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
@@ -911,7 +984,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
-static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
+static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
@@ -938,7 +1011,7 @@ static int warning_issued(struct dquot *dquot, const int warntype)
#ifdef CONFIG_PRINT_QUOTA_WARNING
static int flag_print_warnings = 1;
-static inline int need_print_warning(struct dquot *dquot)
+static int need_print_warning(struct dquot *dquot)
{
if (!flag_print_warnings)
return 0;
@@ -1065,13 +1138,17 @@ err_out:
kfree_skb(skb);
}
#endif
-
-static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can sleep.
+ */
+static void flush_warnings(struct dquot *const *dquots, char *warntype)
{
int i;
for (i = 0; i < MAXQUOTAS; i++)
- if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
+ if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
!warning_issued(dquots[i], warntype[i])) {
#ifdef CONFIG_PRINT_QUOTA_WARNING
print_warning(dquots[i], warntype[i]);
@@ -1082,42 +1159,47 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
}
}
-static inline char ignore_hardlimit(struct dquot *dquot)
+static int ignore_hardlimit(struct dquot *dquot)
{
struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
return capable(CAP_SYS_RESOURCE) &&
- (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
+ (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+ !(info->dqi_flags & V1_DQF_RSQUASH));
}
/* needs dq_data_lock */
static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
{
+ qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
+
*warntype = QUOTA_NL_NOWARN;
if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
if (dquot->dq_dqb.dqb_ihardlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
+ newinodes > dquot->dq_dqb.dqb_ihardlimit &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_IHARDWARN;
return NO_QUOTA;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
- dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+ dquot->dq_dqb.dqb_itime &&
+ get_seconds() >= dquot->dq_dqb.dqb_itime &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_ISOFTLONGWARN;
return NO_QUOTA;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime == 0) {
*warntype = QUOTA_NL_ISOFTWARN;
- dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+ dquot->dq_dqb.dqb_itime = get_seconds() +
+ sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
}
return QUOTA_OK;
@@ -1126,13 +1208,19 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
/* needs dq_data_lock */
static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
{
+ qsize_t tspace;
+ struct super_block *sb = dquot->dq_sb;
+
*warntype = QUOTA_NL_NOWARN;
- if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+ if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ + space;
+
if (dquot->dq_dqb.dqb_bhardlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
+ tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BHARDWARN;
@@ -1140,8 +1228,9 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+ dquot->dq_dqb.dqb_btime &&
+ get_seconds() >= dquot->dq_dqb.dqb_btime &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1149,11 +1238,12 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime == 0) {
if (!prealloc) {
*warntype = QUOTA_NL_BSOFTWARN;
- dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+ dquot->dq_dqb.dqb_btime = get_seconds() +
+ sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
}
else
/*
@@ -1168,15 +1258,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
static int info_idq_free(struct dquot *dquot, qsize_t inodes)
{
+ qsize_t newinodes;
+
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
return QUOTA_NL_NOWARN;
- if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
+ newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
+ if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
return QUOTA_NL_ISOFTBELOW;
if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
- dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
+ newinodes < dquot->dq_dqb.dqb_ihardlimit)
return QUOTA_NL_IHARDBELOW;
return QUOTA_NL_NOWARN;
}
@@ -1203,7 +1296,7 @@ int dquot_initialize(struct inode *inode, int type)
{
unsigned int id = 0;
int cnt, ret = 0;
- struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
+ struct dquot *got[MAXQUOTAS] = { NULL, NULL };
struct super_block *sb = inode->i_sb;
/* First test before acquiring mutex - solves deadlocks when we
@@ -1236,9 +1329,9 @@ int dquot_initialize(struct inode *inode, int type)
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(sb, cnt))
continue;
- if (inode->i_dquot[cnt] == NODQUOT) {
+ if (!inode->i_dquot[cnt]) {
inode->i_dquot[cnt] = got[cnt];
- got[cnt] = NODQUOT;
+ got[cnt] = NULL;
}
}
out_err:
@@ -1248,6 +1341,7 @@ out_err:
dqput(got[cnt]);
return ret;
}
+EXPORT_SYMBOL(dquot_initialize);
/*
* Release all quotas referenced by inode
@@ -1260,7 +1354,7 @@ int dquot_drop(struct inode *inode)
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt];
- inode->i_dquot[cnt] = NODQUOT;
+ inode->i_dquot[cnt] = NULL;
}
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1268,6 +1362,7 @@ int dquot_drop(struct inode *inode)
dqput(put[cnt]);
return 0;
}
+EXPORT_SYMBOL(dquot_drop);
/* Wrapper to remove references to quota structures from inode */
void vfs_dq_drop(struct inode *inode)
@@ -1284,12 +1379,13 @@ void vfs_dq_drop(struct inode *inode)
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt] != NODQUOT)
+ if (inode->i_dquot[cnt])
break;
if (cnt < MAXQUOTAS)
inode->i_sb->dq_op->drop(inode);
}
}
+EXPORT_SYMBOL(vfs_dq_drop);
/*
* Following four functions update i_blocks+i_bytes fields and
@@ -1303,51 +1399,93 @@ void vfs_dq_drop(struct inode *inode)
/*
* This operation can block, but only after everything is updated
*/
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ int warn, int reserve)
{
- int cnt, ret = NO_QUOTA;
+ int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
- /* First test before acquiring mutex - solves deadlocks when we
- * re-enter the quota code and are already holding the mutex */
- if (IS_NOQUOTA(inode)) {
-out_add:
- inode_add_bytes(inode, number);
- return QUOTA_OK;
- }
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto out_add;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
- goto warn_put_all;
+ if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+ == NO_QUOTA) {
+ ret = NO_QUOTA;
+ goto out_unlock;
+ }
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- dquot_incr_space(inode->i_dquot[cnt], number);
+ if (reserve)
+ dquot_resv_space(inode->i_dquot[cnt], number);
+ else
+ dquot_incr_space(inode->i_dquot[cnt], number);
}
- inode_add_bytes(inode, number);
- ret = QUOTA_OK;
-warn_put_all:
+ if (!reserve)
+ inode_add_bytes(inode, number);
+out_unlock:
spin_unlock(&dq_data_lock);
- if (ret == QUOTA_OK)
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
flush_warnings(inode->i_dquot, warntype);
+ return ret;
+}
+
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+ int cnt, ret = QUOTA_OK;
+
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+ */
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out_unlock;
+ }
+
+ ret = __dquot_alloc_space(inode, number, warn, 0);
+ if (ret == NO_QUOTA)
+ goto out_unlock;
+
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
return ret;
}
+EXPORT_SYMBOL(dquot_alloc_space);
+
+int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+{
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_reserve_space);
/*
* This operation can block, but only after everything is updated
@@ -1370,14 +1508,15 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
+ if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
+ == NO_QUOTA)
goto warn_put_all;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
dquot_incr_inodes(inode->i_dquot[cnt], number);
}
@@ -1393,6 +1532,73 @@ warn_put_all:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return ret;
}
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+int dquot_claim_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ spin_lock(&dq_data_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_claim_reserved_space(inode->i_dquot[cnt],
+ number);
+ }
+ /* Update inode bytes */
+ inode_add_bytes(inode, number);
+ spin_unlock(&dq_data_lock);
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_claim_space);
+
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ spin_lock(&dq_data_lock);
+ /* Release reserved dquots */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
+ }
+ spin_unlock(&dq_data_lock);
+
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
/*
* This operation can block, but only after everything is updated
@@ -1418,7 +1624,7 @@ out_sub:
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
dquot_decr_space(inode->i_dquot[cnt], number);
@@ -1433,6 +1639,7 @@ out_sub:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_space);
/*
* This operation can block, but only after everything is updated
@@ -1455,7 +1662,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
dquot_decr_inodes(inode->i_dquot[cnt], number);
@@ -1469,6 +1676,20 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+ qsize_t reserved_space = 0;
+
+ if (sb_any_quota_active(inode->i_sb) &&
+ inode->i_sb->dq_op->get_reserved_space)
+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+ return reserved_space;
+}
/*
* Transfer the number of inode and blocks from one diskquota to an other.
@@ -1478,7 +1699,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
- qsize_t space;
+ qsize_t space, cur_space;
+ qsize_t rsv_space = 0;
struct dquot *transfer_from[MAXQUOTAS];
struct dquot *transfer_to[MAXQUOTAS];
int cnt, ret = QUOTA_OK;
@@ -1493,22 +1715,16 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
return QUOTA_OK;
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- transfer_from[cnt] = NODQUOT;
- transfer_to[cnt] = NODQUOT;
+ transfer_from[cnt] = NULL;
+ transfer_to[cnt] = NULL;
warntype_to[cnt] = QUOTA_NL_NOWARN;
- switch (cnt) {
- case USRQUOTA:
- if (!chuid)
- continue;
- transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
- break;
- case GRPQUOTA:
- if (!chgid)
- continue;
- transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
- break;
- }
}
+ if (chuid)
+ transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
+ USRQUOTA);
+ if (chgid)
+ transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
+ GRPQUOTA);
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Now recheck reliably when holding dqptr_sem */
@@ -1517,10 +1733,12 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
goto put_all;
}
spin_lock(&dq_data_lock);
- space = inode_get_bytes(inode);
+ cur_space = inode_get_bytes(inode);
+ rsv_space = dquot_get_reserved_space(inode);
+ space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (transfer_to[cnt] == NODQUOT)
+ if (!transfer_to[cnt])
continue;
transfer_from[cnt] = inode->i_dquot[cnt];
if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
@@ -1536,7 +1754,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
/*
* Skip changes for same uid or gid or for turned off quota-type.
*/
- if (transfer_to[cnt] == NODQUOT)
+ if (!transfer_to[cnt])
continue;
/* Due to IO error we might not have transfer_from[] structure */
@@ -1546,11 +1764,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
warntype_from_space[cnt] =
info_bdq_free(transfer_from[cnt], space);
dquot_decr_inodes(transfer_from[cnt], 1);
- dquot_decr_space(transfer_from[cnt], space);
+ dquot_decr_space(transfer_from[cnt], cur_space);
+ dquot_free_reserved_space(transfer_from[cnt],
+ rsv_space);
}
dquot_incr_inodes(transfer_to[cnt], 1);
- dquot_incr_space(transfer_to[cnt], space);
+ dquot_incr_space(transfer_to[cnt], cur_space);
+ dquot_resv_space(transfer_to[cnt], rsv_space);
inode->i_dquot[cnt] = transfer_to[cnt];
}
@@ -1564,7 +1785,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
if (transfer_to[cnt]) {
mark_dquot_dirty(transfer_to[cnt]);
/* The reference we got is transferred to the inode */
- transfer_to[cnt] = NODQUOT;
+ transfer_to[cnt] = NULL;
}
}
warn_put_all:
@@ -1582,10 +1803,11 @@ over_quota:
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Clear dquot pointers we don't want to dqput() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- transfer_from[cnt] = NODQUOT;
+ transfer_from[cnt] = NULL;
ret = NO_QUOTA;
goto warn_put_all;
}
+EXPORT_SYMBOL(dquot_transfer);
/* Wrapper for transferring ownership of an inode */
int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1597,7 +1819,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
}
return 0;
}
-
+EXPORT_SYMBOL(vfs_dq_transfer);
/*
* Write info of quota file to disk
@@ -1612,6 +1834,7 @@ int dquot_commit_info(struct super_block *sb, int type)
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit_info);
/*
* Definitions of diskquota operations.
@@ -1697,8 +1920,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
drop_dquot_ref(sb, cnt);
invalidate_dquots(sb, cnt);
/*
- * Now all dquots should be invalidated, all writes done so we should be only
- * users of the info. No locks needed.
+ * Now all dquots should be invalidated, all writes done so we
+ * should be only users of the info. No locks needed.
*/
if (info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
@@ -1736,10 +1959,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_loaded(sb, cnt)) {
- mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock_nested(&toputinode[cnt]->i_mutex,
+ I_MUTEX_QUOTA);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
- truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+ truncate_inode_pages(&toputinode[cnt]->i_data,
+ 0);
mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
}
@@ -1764,13 +1989,14 @@ put_inodes:
}
return ret;
}
+EXPORT_SYMBOL(vfs_quota_disable);
int vfs_quota_off(struct super_block *sb, int type, int remount)
{
return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
(DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
}
-
+EXPORT_SYMBOL(vfs_quota_off);
/*
* Turn quotas on on a device
*/
@@ -1828,7 +2054,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
down_write(&dqopt->dqptr_sem);
- oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
+ oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
+ S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
up_write(&dqopt->dqptr_sem);
sb->dq_op->drop(inode);
@@ -1847,7 +2074,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
dqopt->info[type].dqi_fmt_id = format_id;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
mutex_lock(&dqopt->dqio_mutex);
- if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
+ error = dqopt->ops[type]->read_file_info(sb, type);
+ if (error < 0) {
mutex_unlock(&dqopt->dqio_mutex);
goto out_file_init;
}
@@ -1927,6 +2155,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
DQUOT_LIMITS_ENABLED);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_path);
int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
int remount)
@@ -1944,6 +2173,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
}
return error;
}
+EXPORT_SYMBOL(vfs_quota_on);
/*
* More powerful function for turning on quotas allowing setting
@@ -1990,6 +2220,7 @@ out_lock:
load_quota:
return vfs_load_quota_inode(inode, type, format_id, flags);
}
+EXPORT_SYMBOL(vfs_quota_enable);
/*
* This function is used when filesystem needs to initialize quotas
@@ -2019,6 +2250,7 @@ out:
dput(dentry);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_mount);
/* Wrapper to turn on quotas when remounting rw */
int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2035,6 +2267,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb)
}
return ret;
}
+EXPORT_SYMBOL(vfs_dq_quota_on_remount);
static inline qsize_t qbtos(qsize_t blocks)
{
@@ -2054,7 +2287,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
- di->dqb_curspace = dm->dqb_curspace;
+ di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
di->dqb_ihardlimit = dm->dqb_ihardlimit;
di->dqb_isoftlimit = dm->dqb_isoftlimit;
di->dqb_curinodes = dm->dqb_curinodes;
@@ -2064,18 +2297,20 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_unlock(&dq_data_lock);
}
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+ struct if_dqblk *di)
{
struct dquot *dquot;
dquot = dqget(sb, id, type);
- if (dquot == NODQUOT)
+ if (!dquot)
return -ESRCH;
do_get_dqblk(dquot, di);
dqput(dquot);
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqblk);
/* Generic routine for setting common part of quota structure */
static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2094,7 +2329,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
if (di->dqb_valid & QIF_SPACE) {
- dm->dqb_curspace = di->dqb_curspace;
+ dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
check_blim = 1;
__set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
@@ -2127,22 +2362,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
}
if (check_blim) {
- if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
+ if (!dm->dqb_bsoftlimit ||
+ dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- }
- else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */
+ } else if (!(di->dqb_valid & QIF_BTIME))
+ /* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
if (check_ilim) {
- if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
+ if (!dm->dqb_isoftlimit ||
+ dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- }
- else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */
+ } else if (!(di->dqb_valid & QIF_ITIME))
+ /* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
- if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
+ if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
+ dm->dqb_isoftlimit)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2152,7 +2390,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
return 0;
}
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+ struct if_dqblk *di)
{
struct dquot *dquot;
int rc;
@@ -2167,6 +2406,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
out:
return rc;
}
+EXPORT_SYMBOL(vfs_set_dqblk);
/* Generic routine for getting common part of quota file information */
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2188,6 +2428,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqinfo);
/* Generic routine for setting common part of quota file information */
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2207,7 +2448,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
if (ii->dqi_valid & IIF_IGRACE)
mi->dqi_igrace = ii->dqi_igrace;
if (ii->dqi_valid & IIF_FLAGS)
- mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
+ mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
+ (ii->dqi_flags & DQF_MASK);
spin_unlock(&dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
@@ -2216,6 +2458,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return err;
}
+EXPORT_SYMBOL(vfs_set_dqinfo);
struct quotactl_ops vfs_quotactl_ops = {
.quota_on = vfs_quota_on,
@@ -2365,43 +2608,10 @@ static int __init dquot_init(void)
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
if (genl_register_family(&quota_genl_family) != 0)
- printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
+ printk(KERN_ERR
+ "VFS: Failed to create quota netlink interface.\n");
#endif
return 0;
}
module_init(dquot_init);
-
-EXPORT_SYMBOL(register_quota_format);
-EXPORT_SYMBOL(unregister_quota_format);
-EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_data_lock);
-EXPORT_SYMBOL(vfs_quota_enable);
-EXPORT_SYMBOL(vfs_quota_on);
-EXPORT_SYMBOL(vfs_quota_on_path);
-EXPORT_SYMBOL(vfs_quota_on_mount);
-EXPORT_SYMBOL(vfs_quota_disable);
-EXPORT_SYMBOL(vfs_quota_off);
-EXPORT_SYMBOL(dquot_scan_active);
-EXPORT_SYMBOL(vfs_quota_sync);
-EXPORT_SYMBOL(vfs_get_dqinfo);
-EXPORT_SYMBOL(vfs_set_dqinfo);
-EXPORT_SYMBOL(vfs_get_dqblk);
-EXPORT_SYMBOL(vfs_set_dqblk);
-EXPORT_SYMBOL(dquot_commit);
-EXPORT_SYMBOL(dquot_commit_info);
-EXPORT_SYMBOL(dquot_acquire);
-EXPORT_SYMBOL(dquot_release);
-EXPORT_SYMBOL(dquot_mark_dquot_dirty);
-EXPORT_SYMBOL(dquot_initialize);
-EXPORT_SYMBOL(dquot_drop);
-EXPORT_SYMBOL(vfs_dq_drop);
-EXPORT_SYMBOL(dqget);
-EXPORT_SYMBOL(dqput);
-EXPORT_SYMBOL(dquot_alloc_space);
-EXPORT_SYMBOL(dquot_alloc_inode);
-EXPORT_SYMBOL(dquot_free_space);
-EXPORT_SYMBOL(dquot_free_inode);
-EXPORT_SYMBOL(dquot_transfer);
-EXPORT_SYMBOL(vfs_dq_transfer);
-EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/quota.c b/fs/quota/quota.c
index d76ada914f98..b7f5a468f076 100644
--- a/fs/quota.c
+++ b/fs/quota/quota.c
@@ -20,7 +20,8 @@
#include <linux/types.h>
/* Check validity of generic quotactl commands */
-static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
if (type >= MAXQUOTAS)
return -EINVAL;
@@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
case Q_SETINFO:
case Q_SETQUOTA:
case Q_GETQUOTA:
- /* This is just informative test so we are satisfied without a lock */
+ /* This is just an informative test so we are satisfied
+ * without the lock */
if (!sb_has_quota_active(sb, type))
return -ESRCH;
}
@@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
}
/* Check validity of XFS Quota Manager commands */
-static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
if (type >= XQM_MAXQUOTAS)
return -EINVAL;
@@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
return 0;
}
-static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
int error;
@@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
+ I_MUTEX_QUOTA);
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
@@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
- /* This test just improves performance so it needn't be reliable... */
+ /* This test just improves performance so it needn't be
+ * reliable... */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && type != cnt)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
- list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
+ list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
continue;
break;
}
@@ -227,7 +233,8 @@ restart:
}
/* Copy parameters and call proper function */
-static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
+ void __user *addr)
{
int ret;
@@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_QUOTAON: {
char *pathname;
- if (IS_ERR(pathname = getname(addr)))
+ pathname = getname(addr);
+ if (IS_ERR(pathname))
return PTR_ERR(pathname);
ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
@@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETINFO: {
struct if_dqinfo info;
- if ((ret = sb->s_qcop->get_info(sb, type, &info)))
+ ret = sb->s_qcop->get_info(sb, type, &info);
+ if (ret)
return ret;
if (copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
@@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETQUOTA: {
struct if_dqblk idq;
- if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+ if (ret)
return ret;
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
@@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_XGETQUOTA: {
struct fs_disk_quota fdq;
- if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
+ ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+ if (ret)
return ret;
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
@@ -341,7 +352,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
-static inline struct super_block *quotactl_block(const char __user *special)
+static struct super_block *quotactl_block(const char __user *special)
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
diff --git a/fs/quota_tree.c b/fs/quota/quota_tree.c
index 953404c95b17..f81f4bcfb178 100644
--- a/fs/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -22,8 +22,6 @@ MODULE_LICENSE("GPL");
#define __QUOTA_QT_PARANOIA
-typedef char *dqbuf_t;
-
static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
{
unsigned int epb = info->dqi_usable_bs >> 2;
@@ -35,46 +33,42 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
}
/* Number of entries in one blocks */
-static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
+static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
{
return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
/ info->dqi_entry_size;
}
-static dqbuf_t getdqbuf(size_t size)
+static char *getdqbuf(size_t size)
{
- dqbuf_t buf = kmalloc(size, GFP_NOFS);
+ char *buf = kmalloc(size, GFP_NOFS);
if (!buf)
- printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
+ printk(KERN_WARNING
+ "VFS: Not enough memory for quota buffers.\n");
return buf;
}
-static inline void freedqbuf(dqbuf_t buf)
-{
- kfree(buf);
-}
-
-static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
memset(buf, 0, info->dqi_usable_bs);
- return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf,
+ return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
-static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
- return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf,
+ return sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int ret, blk;
@@ -98,12 +92,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
mark_info_dirty(info->dqi_sb, info->dqi_type);
ret = blk;
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
/* Insert empty block to the list */
-static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
{
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@@ -120,9 +114,10 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
}
/* Remove given block from the list of blocks with free entries */
-static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
{
- dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
uint nextblk = le32_to_cpu(dh->dqdh_next_free);
uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@@ -153,21 +148,24 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
info->dqi_free_entry = nextblk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
}
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
- printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
+ printk(KERN_ERR
+ "VFS: Can't write block (%u) with free entries.\n",
+ blk);
return 0;
out_buf:
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
return err;
}
/* Insert given block to the beginning of list with free entries */
-static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
{
- dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@@ -188,12 +186,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
if (err < 0)
goto out_buf;
}
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
info->dqi_free_entry = blk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
return 0;
out_buf:
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
return err;
}
@@ -215,7 +213,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
{
uint blk, i;
struct qt_disk_dqdbheader *dh;
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
char *ddquot;
*err = 0;
@@ -233,11 +231,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk = get_free_dqblk(info);
if ((int)blk < 0) {
*err = blk;
- freedqbuf(buf);
+ kfree(buf);
return 0;
}
memset(buf, 0, info->dqi_usable_bs);
- /* This is enough as block is already zeroed and entry list is empty... */
+ /* This is enough as the block is already zeroed and the entry
+ * list is empty... */
info->dqi_free_entry = blk;
mark_info_dirty(dquot->dq_sb, dquot->dq_type);
}
@@ -253,9 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
}
le16_add_cpu(&dh->dqdh_entries, 1);
/* Find free structure in block */
- for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
- i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
- i++, ddquot += info->dqi_entry_size);
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (qtree_entry_unused(info, ddquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
#ifdef __QUOTA_QT_PARANOIA
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -273,10 +275,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
- freedqbuf(buf);
+ kfree(buf);
return blk;
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return 0;
}
@@ -284,7 +286,7 @@ out_buf:
static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *treeblk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk;
@@ -333,7 +335,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
put_free_dqblk(info, buf, *treeblk);
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -346,14 +348,15 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
}
/*
- * We don't have to be afraid of deadlocks as we never have quotas on quota files...
+ * We don't have to be afraid of deadlocks as we never have quotas on quota
+ * files...
*/
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
ssize_t ret;
- dqbuf_t ddquot = getdqbuf(info->dqi_entry_size);
+ char *ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
@@ -364,15 +367,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (ret < 0) {
printk(KERN_ERR "VFS: Error %zd occurred while "
"creating quota.\n", ret);
- freedqbuf(ddquot);
+ kfree(ddquot);
return ret;
}
}
spin_lock(&dq_data_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dq_data_lock);
- ret = sb->s_op->quota_write(sb, type, (char *)ddquot,
- info->dqi_entry_size, dquot->dq_off);
+ ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
if (ret != info->dqi_entry_size) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
sb->s_id);
@@ -382,7 +385,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ret = 0;
}
dqstats.writes++;
- freedqbuf(ddquot);
+ kfree(ddquot);
return ret;
}
@@ -393,7 +396,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{
struct qt_disk_dqdbheader *dh;
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
if (!buf)
@@ -444,7 +447,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
dquot->dq_off = 0; /* Quota is now unattached */
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -452,7 +455,7 @@ out_buf:
static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf;
@@ -475,9 +478,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
int i;
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
/* Block got empty? */
- for (i = 0;
- i < (info->dqi_usable_bs >> 2) && !ref[i];
- i++);
+ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+ ;
/* Don't put the root block into the free block list */
if (i == (info->dqi_usable_bs >> 2)
&& *blk != QT_TREEOFF) {
@@ -491,7 +493,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -510,7 +512,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
int i;
char *ddquot;
@@ -522,9 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
goto out_buf;
}
- for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
- i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
- i++, ddquot += info->dqi_entry_size);
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (info->dqi_ops->is_id(ddquot, dquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: Quota for id %u referenced "
"but not present.\n", dquot->dq_id);
@@ -535,7 +540,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -543,7 +548,7 @@ out_buf:
static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
@@ -563,7 +568,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -579,7 +584,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
loff_t offset;
- dqbuf_t ddquot;
+ char *ddquot;
int ret = 0;
#ifdef __QUOTA_QT_PARANOIA
@@ -607,8 +612,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
- ret = sb->s_op->quota_read(sb, type, (char *)ddquot,
- info->dqi_entry_size, dquot->dq_off);
+ ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
@@ -616,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
"structure for id %u.\n", dquot->dq_id);
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
- freedqbuf(ddquot);
+ kfree(ddquot);
goto out;
}
spin_lock(&dq_data_lock);
@@ -627,7 +632,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dq_data_lock);
- freedqbuf(ddquot);
+ kfree(ddquot);
out:
dqstats.reads++;
return ret;
@@ -638,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
* the only one operating on dquot (thanks to dq_lock) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
- if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
+ !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
return qtree_delete_dquot(info, dquot);
return 0;
}
diff --git a/fs/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db81a51..a1ab8db81a51 100644
--- a/fs/quota_tree.h
+++ b/fs/quota/quota_tree.h
diff --git a/fs/quota_v1.c b/fs/quota/quota_v1.c
index b4af1c69ad16..0edcf42b1778 100644
--- a/fs/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot)
/* Set structure to 0s in case read fails/is after end of file */
memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
- dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+ dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
- if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
- dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
+ if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
+ dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+ dquot->dq_dqb.dqb_ihardlimit == 0 &&
+ dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
dqstats.reads++;
@@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot)
v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
if (dquot->dq_id == 0) {
- dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
- dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+ dqblk.dqb_btime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+ dqblk.dqb_itime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
}
ret = 0;
if (sb_dqopt(dquot->dq_sb)->files[type])
- ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
- sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+ ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+ (char *)&dqblk, sizeof(struct v1_disk_dqblk),
+ v1_dqoff(dquot->dq_id));
if (ret != sizeof(struct v1_disk_dqblk)) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
dquot->dq_sb->s_id);
@@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type)
return 0;
blocks = isize >> BLOCK_SIZE_BITS;
off = isize & (BLOCK_SIZE - 1);
- if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
+ if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
+ sizeof(struct v1_disk_dqblk))
return 0;
- /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
- size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+ /* Doublecheck whether we didn't get file with new format - with old
+ * quotactl() this could happen */
+ size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader))
return 1; /* Probably not new format */
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
return 1; /* Definitely not new format */
- printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
+ printk(KERN_INFO
+ "VFS: %s: Refusing to turn on old quota format on given file."
+ " It probably contains newer quota format.\n", sb->s_id);
return 0; /* Seems like a new format file -> refuse it */
}
@@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
struct v1_disk_dqblk dqblk;
int ret;
- if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
@@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type)
/* limits are stored as unsigned 32-bit data */
dqopt->info[type].dqi_maxblimit = 0xffffffff;
dqopt->info[type].dqi_maxilimit = 0xffffffff;
- dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
- dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+ dqopt->info[type].dqi_igrace =
+ dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+ dqopt->info[type].dqi_bgrace =
+ dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
return ret;
}
@@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type)
int ret;
dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
- if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
- sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
diff --git a/fs/quota_v2.c b/fs/quota/quota_v2.c
index b618b563635c..a5475fb1ae44 100644
--- a/fs/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
static const uint quota_magics[] = V2_INITQMAGICS;
static const uint quota_versions[] = V2_INITQVERSIONS;
- size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+ size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
printk("quota_v2: failed read expected=%zd got=%zd\n",
sizeof(struct v2_disk_dqheader), size);
diff --git a/fs/quotaio_v1.h b/fs/quota/quotaio_v1.h
index 746654b5de70..746654b5de70 100644
--- a/fs/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
diff --git a/fs/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685c..530fe580685c 100644
--- a/fs/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5d7c7ececa64..995ef1d6686c 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/ramfs.h>
-#include <linux/quotaops.h>
#include <linux/pagevec.h>
#include <linux/mman.h>
@@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
if (ret)
return ret;
- /* by providing our own setattr() method, we skip this quotaism */
- if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
- (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
- ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
-
/* pick out size-changing events */
if (ia->ia_valid & ATTR_SIZE) {
loff_t size = i_size_read(inode);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4646caa60455..f32d1425cc9f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
journal_mark_dirty(th, s, sbh);
if (for_unformatted)
- DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+ vfs_dq_free_block_nodirty(inode, 1);
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
amount_needed, hint->inode->i_uid);
#endif
quota_ret =
- DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
+ vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
if (quota_ret) /* Quota exceeded? */
return QUOTA_EXCEEDED;
if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating (prealloc) %d blocks id=%u",
hint->prealloc_size, hint->inode->i_uid);
#endif
- quota_ret =
- DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
+ quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
hint->prealloc_size);
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
nr_allocated,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */
+ /* Free not allocated blocks */
+ vfs_dq_free_block_nodirty(hint->inode,
+ amount_needed + hint->prealloc_size -
+ nr_allocated);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
+ vfs_dq_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 55fce92cdf18..823227a7662a 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
if (!err)
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto out_end_trans;
}
@@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
/* Drop can be outside and it needs more credits so it's better to have it outside */
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
goto out;
error =
- DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
journal_end(&th, inode->i_sb,
jbegin_count);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 738967f6c8ee..639d635d9d4b 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
*/
static int drop_new_inode(struct inode *inode)
{
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
make_bad_inode(inode);
inode->i_flags |= S_NOQUOTA;
iput(inode);
@@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
}
/* utility function that does setup for reiserfs_new_inode.
-** DQUOT_INIT needs lots of credits so it's better to have it
+** vfs_dq_init needs lots of credits so it's better to have it
** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func.
*/
@@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
} else {
inode->i_gid = current_fsgid();
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
return 0;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index abbc64dcc8d4..73aaa33f6735 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
#endif
- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+ vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
/* Return deleted body length */
return n_ret_value;
@@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
- DQUOT_FREE_SPACE_NODIRTY(inode,
+ vfs_dq_free_space_nodirty(inode,
quota_cut_bytes);
}
break;
@@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, '?');
#endif
- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+ vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
return n_ret_value;
}
@@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(p_s_key->on_disk_key)));
#endif
- if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+ if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
pathrelse(p_s_search_path);
return -EDQUOT;
}
@@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
n_pasted_size, inode->i_uid,
key2type(&(p_s_key->on_disk_key)));
#endif
- DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+ vfs_dq_free_space_nodirty(inode, n_pasted_size);
return retval;
}
@@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
#endif
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
- if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+ if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
pathrelse(p_s_path);
return -EDQUOT;
}
@@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
quota_bytes, inode->i_uid, head2type(p_s_ih));
#endif
if (inode)
- DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+ vfs_dq_free_space_nodirty(inode, quota_bytes);
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b75829..5dbafb739401 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
#ifdef CONFIG_QUOTA
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-static int reiserfs_dquot_initialize(struct inode *, int);
-static int reiserfs_dquot_drop(struct inode *);
static int reiserfs_write_dquot(struct dquot *);
static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static struct dquot_operations reiserfs_quota_operations = {
- .initialize = reiserfs_dquot_initialize,
- .drop = reiserfs_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
#ifdef CONFIG_QUOTA
-static int reiserfs_dquot_initialize(struct inode *inode, int type)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (ret)
- goto out;
- ret = dquot_initialize(inode, type);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
-static int reiserfs_dquot_drop(struct inode *inode)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (ret) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- goto out;
- }
- ret = dquot_drop(inode);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ad92461cbfc3..ae881ccd2f03 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -1136,7 +1136,7 @@ xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
return 1;
}
-static struct dentry_operations xattr_lookup_poison_ops = {
+static const struct dentry_operations xattr_lookup_poison_ops = {
.d_compare = xattr_lookup_poison,
};
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index e7ddd0328ddc..3e4803b4427e 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -277,7 +277,7 @@ static int smb_hash_dentry(struct dentry *, struct qstr *);
static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
static int smb_delete_dentry(struct dentry *);
-static struct dentry_operations smbfs_dentry_operations =
+static const struct dentry_operations smbfs_dentry_operations =
{
.d_revalidate = smb_lookup_validate,
.d_hash = smb_hash_dentry,
@@ -285,7 +285,7 @@ static struct dentry_operations smbfs_dentry_operations =
.d_delete = smb_delete_dentry,
};
-static struct dentry_operations smbfs_dentry_operations_case =
+static const struct dentry_operations smbfs_dentry_operations_case =
{
.d_revalidate = smb_lookup_validate,
.d_delete = smb_delete_dentry,
diff --git a/fs/super.c b/fs/super.c
index dd4acb158b5e..2ba481518ba7 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
s->s_count -= S_BIAS-1;
spin_unlock(&sb_lock);
- DQUOT_OFF(s, 0);
+ vfs_dq_off(s, 0);
down_write(&s->s_umount);
fs->kill_sb(s);
put_filesystem(fs);
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
+ vfs_dq_sync(sb);
lock_super(sb);
if (sb->s_dirt && sb->s_op->write_super)
sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
mark_files_ro(sb);
else if (!fs_may_remount_ro(sb))
return -EBUSY;
- retval = DQUOT_OFF(sb, 1);
+ retval = vfs_dq_off(sb, 1);
if (retval < 0 && retval != -ENOSYS)
return -EBUSY;
}
@@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
if (remount_rw)
- DQUOT_ON_REMOUNT(sb);
+ vfs_dq_quota_on_remount(sb);
return 0;
}
@@ -838,7 +838,8 @@ int get_sb_bdev(struct file_system_type *fs_type,
bdev->bd_super = s;
}
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
error_s:
error = PTR_ERR(s);
@@ -884,7 +885,8 @@ int get_sb_nodev(struct file_system_type *fs_type,
return error;
}
s->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
}
EXPORT_SYMBOL(get_sb_nodev);
@@ -916,7 +918,8 @@ int get_sb_single(struct file_system_type *fs_type,
s->s_flags |= MS_ACTIVE;
}
do_remount_sb(s, flags, data, 0);
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
}
EXPORT_SYMBOL(get_sb_single);
diff --git a/fs/sync.c b/fs/sync.c
index ec95a69d17aa..7abc65fbf21d 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
{
wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
- DQUOT_SYNC(NULL);
+ vfs_dq_sync(NULL);
sync_supers(); /* Write the superblocks */
sync_filesystems(0); /* Start syncing the filesystems */
sync_filesystems(wait); /* Waitingly sync the filesystems */
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 66aeb4fff0c3..d88d0fac9fa5 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -302,7 +302,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
iput(inode);
}
-static struct dentry_operations sysfs_dentry_ops = {
+static const struct dentry_operations sysfs_dentry_ops = {
.d_iput = sysfs_d_iput,
};
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a1f1ef33e81c..33e047b59b8d 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -38,7 +38,7 @@ static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
return 0;
}
-struct dentry_operations sysv_dentry_operations = {
+const struct dentry_operations sysv_dentry_operations = {
.d_hash = sysv_hash,
};
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 38ebe3f85b3d..5784a318c883 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -170,7 +170,7 @@ extern const struct file_operations sysv_file_operations;
extern const struct file_operations sysv_dir_operations;
extern const struct address_space_operations sysv_aops;
extern const struct super_operations sysv_sops;
-extern struct dentry_operations sysv_dentry_operations;
+extern const struct dentry_operations sysv_dentry_operations;
enum {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1182b66a5491..c5c98355459a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2034,7 +2034,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
/* 'fill_super()' opens ubi again so we must close it here */
ubi_close_volume(ubi);
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
out_deact:
up_write(&sb->s_umount);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd494bd..2bb788a2acb1 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
((char *)bh->b_data)[(bit + i) >> 3]);
} else {
if (inode)
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
udf_add_free_space(sbi, sbi->s_partition, 1);
}
}
@@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
while (bit < (sb->s_blocksize << 3) && block_count > 0) {
if (!udf_test_bit(bit, bh->b_data))
goto out;
- else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+ else if (vfs_dq_prealloc_block(inode, 1))
goto out;
else if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto out;
}
block_count--;
@@ -393,7 +393,7 @@ got_block:
/*
* Check quota for allocation of this block.
*/
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (inode && vfs_dq_alloc_block(inode, 1)) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
@@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
/* We do this up front - There are some error conditions that
could occure, but.. oh well */
if (inode)
- DQUOT_FREE_BLOCK(inode, count);
+ vfs_dq_free_block(inode, count);
if (udf_add_free_space(sbi, sbi->s_partition, count))
mark_buffer_dirty(sbi->s_lvid_bh);
@@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
epos.offset -= adsize;
alloc_count = (elen >> sb->s_blocksize_bits);
- if (inode && DQUOT_PREALLOC_BLOCK(inode,
+ if (inode && vfs_dq_prealloc_block(inode,
alloc_count > block_count ? block_count : alloc_count))
alloc_count = 0;
else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
goal_eloc.logicalBlockNum++;
goal_elen -= sb->s_blocksize;
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (inode && vfs_dq_alloc_block(inode, 1)) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc84297ddb..47dbe5613f90 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
clear_inode(inode);
@@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
insert_inode_hash(inode);
mark_inode_dirty(inode);
- if (DQUOT_ALLOC_INODE(inode)) {
- DQUOT_DROP(inode);
+ if (vfs_dq_alloc_inode(inode)) {
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0d9ada173739..54c16ec95dff 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
"bit already cleared for fragment %u", i);
}
- DQUOT_FREE_BLOCK (inode, count);
+ vfs_dq_free_block(inode, count);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@ do_more:
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
- DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+ vfs_dq_free_block(inode, uspi->s_fpb);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
for (i = oldcount; i < newcount; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
- if(DQUOT_ALLOC_BLOCK(inode, count)) {
+ if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@@ -664,7 +664,7 @@ cg_found:
for (i = count; i < uspi->s_fpb; i++)
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
- DQUOT_FREE_BLOCK(inode, i);
+ vfs_dq_free_block(inode, i);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@ cg_found:
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
- if(DQUOT_ALLOC_BLOCK(inode, count)) {
+ if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@@ -747,7 +747,7 @@ gotit:
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, -1);
- if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+ if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
*err = -EDQUOT;
return INVBLOCK;
}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 6f5dcf006096..3527c00fef0d 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
is_directory = S_ISDIR(inode->i_mode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
clear_inode (inode);
@@ -355,8 +355,8 @@ cg_found:
unlock_super (sb);
- if (DQUOT_ALLOC_INODE(inode)) {
- DQUOT_DROP(inode);
+ if (vfs_dq_alloc_inode(inode)) {
+ vfs_dq_drop(inode);
err = -EDQUOT;
goto fail_without_unlock;
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 39f877898565..3d2512c21f05 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -622,7 +622,6 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
mode_t mode;
- unsigned i;
/*
* Copy data to the in-core inode.
@@ -655,11 +654,12 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
+ memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
+ sizeof(ufs_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
+ memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
+ ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
}
return 0;
}
@@ -669,7 +669,6 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
mode_t mode;
- unsigned i;
UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
/*
@@ -704,12 +703,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
*/
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufsi->i_u1.u2_i_data[i] =
- ufs2_inode->ui_u2.ui_addr.ui_db[i];
+ memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
+ sizeof(ufs2_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
+ memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
+ sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
+ ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
}
return 0;
}
@@ -781,7 +780,6 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct super_block *sb = inode->i_sb;
struct ufs_inode_info *ufsi = UFS_I(inode);
- unsigned i;
ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
@@ -809,12 +807,12 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
} else if (inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
+ memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
+ sizeof(ufs_inode->ui_u2.ui_addr));
}
else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+ memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink));
}
if (!inode->i_nlink)
@@ -825,7 +823,6 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
{
struct super_block *sb = inode->i_sb;
struct ufs_inode_info *ufsi = UFS_I(inode);
- unsigned i;
UFSD("ENTER\n");
ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
@@ -850,11 +847,11 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
} else if (inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i];
+ memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
+ sizeof(ufs_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+ memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink));
}
if (!inode->i_nlink)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index e3a9b1fac75a..23119fe7ad62 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -147,7 +147,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
} else {
/* fast symlink */
inode->i_op = &ufs_fast_symlink_inode_operations;
- memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
+ memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
inode->i_size = l-1;
}
mark_inode_dirty(inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 261a1c2f22dd..e1c1fc5ee239 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -636,6 +636,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
unsigned block_size, super_block_size;
unsigned flags;
unsigned super_block_offset;
+ unsigned maxsymlen;
int ret = -EINVAL;
uspi = NULL;
@@ -1069,6 +1070,16 @@ magic_found:
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
+ if (uspi->fs_magic == UFS2_MAGIC)
+ maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR);
+ else
+ maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR);
+ if (uspi->s_maxsymlinklen > maxsymlen) {
+ ufs_warning(sb, __func__, "ufs_read_super: excessive maximum "
+ "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
+ uspi->s_maxsymlinklen = maxsymlen;
+ }
+
inode = ufs_iget(sb, UFS_ROOTINO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 11c035168ea6..69b3427d7885 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -23,7 +23,7 @@ struct ufs_sb_info {
struct ufs_inode_info {
union {
__fs32 i_data[15];
- __u8 i_symlink[4*15];
+ __u8 i_symlink[2 * 4 * 15];
__fs64 u2_i_data[15];
} i_u1;
__u32 i_flags;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e5f4ae989abf..c19a93c3be85 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -758,6 +758,8 @@ struct drm_driver {
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
+ int (*debugfs_init)(struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct drm_minor *minor);
/**
* Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +795,48 @@ struct drm_driver {
#define DRM_MINOR_CONTROL 2
#define DRM_MINOR_RENDER 3
+
+/**
+ * debugfs node list. This structure represents a debugfs file to
+ * be created by the drm core
+ */
+struct drm_debugfs_list {
+ const char *name; /** file name */
+ int (*show)(struct seq_file*, void*); /** show callback */
+ u32 driver_features; /**< Required driver features for this entry */
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_debugfs_node {
+ struct list_head list;
+ struct drm_minor *minor;
+ struct drm_debugfs_list *debugfs_ent;
+ struct dentry *dent;
+};
+
+/**
+ * Info file list entry. This structure represents a debugfs or proc file to
+ * be created by the drm core
+ */
+struct drm_info_list {
+ const char *name; /** file name */
+ int (*show)(struct seq_file*, void*); /** show callback */
+ u32 driver_features; /**< Required driver features for this entry */
+ void *data;
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_info_node {
+ struct list_head list;
+ struct drm_minor *minor;
+ struct drm_info_list *info_ent;
+ struct dentry *dent;
+};
+
/**
* DRM minor structure. This structure represents a drm minor number.
*/
@@ -802,7 +846,12 @@ struct drm_minor {
dev_t device; /**< Device number for mknod */
struct device kdev; /**< Linux device */
struct drm_device *dev;
- struct proc_dir_entry *dev_root; /**< proc directory entry */
+
+ struct proc_dir_entry *proc_root; /**< proc directory entry */
+ struct drm_info_node proc_nodes;
+ struct dentry *debugfs_root;
+ struct drm_info_node debugfs_nodes;
+
struct drm_master *master; /* currently active master for this node */
struct list_head master_list;
struct drm_mode_group mode_group;
@@ -1258,6 +1307,7 @@ extern unsigned int drm_debug;
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
+extern struct dentry *drm_debugfs_root;
extern struct idr drm_minors_idr;
@@ -1268,6 +1318,31 @@ extern int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root);
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
+ /* Debugfs support */
+#if defined(CONFIG_DEBUG_FS)
+extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+ struct dentry *root);
+extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor);
+extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+ struct drm_minor *minor);
+extern int drm_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+ /* Info file support */
+extern int drm_name_info(struct seq_file *m, void *data);
+extern int drm_vm_info(struct seq_file *m, void *data);
+extern int drm_queues_info(struct seq_file *m, void *data);
+extern int drm_bufs_info(struct seq_file *m, void *data);
+extern int drm_vblank_info(struct seq_file *m, void *data);
+extern int drm_clients_info(struct seq_file *m, void* data);
+extern int drm_gem_name_info(struct seq_file *m, void *data);
+extern int drm_gem_object_info(struct seq_file *m, void* data);
+
+#if DRM_DEBUG_CODE
+extern int drm_vma_info(struct seq_file *m, void *data);
+#endif
+
/* Scatter Gather Support (drm_scatter.h) */
extern void drm_sg_cleanup(struct drm_sg_mem * entry);
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 5165f240aa68..76c4c8243038 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -418,4 +418,6 @@
{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd7ac793be19..f19fd9045ea0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
void mark_buffer_async_write(struct buffer_head *bh);
-void invalidate_bdev(struct block_device *);
-int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
-int fsync_bdev(struct block_device *);
-struct super_block *freeze_bdev(struct block_device *);
-int thaw_bdev(struct block_device *, struct super_block *);
-int fsync_super(struct super_block *);
-int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fd2194ff573..b880864672de 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -125,6 +125,13 @@ struct compat_dirent {
char d_name[256];
};
+struct compat_ustat {
+ compat_daddr_t f_tfree;
+ compat_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
@@ -178,6 +185,7 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage ssize_t compat_sys_readv(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c66d22487bf8..15156364d196 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -112,7 +112,7 @@ struct dentry {
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
- struct dentry_operations *d_op;
+ const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 4d078e99c017..c6b3ca3af6df 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -25,10 +25,12 @@
#include <linux/types.h>
#include <linux/firewire-constants.h>
-#define FW_CDEV_EVENT_BUS_RESET 0x00
-#define FW_CDEV_EVENT_RESPONSE 0x01
-#define FW_CDEV_EVENT_REQUEST 0x02
-#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
+#define FW_CDEV_EVENT_BUS_RESET 0x00
+#define FW_CDEV_EVENT_RESPONSE 0x01
+#define FW_CDEV_EVENT_REQUEST 0x02
+#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
+#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
+#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
* stripped of all packets up until and including the interrupt packet are
- * returned in the @header field.
+ * returned in the @header field. The amount of header data per packet is as
+ * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
+ *
+ * In version 1 of this ABI, header data consisted of the 1394 isochronous
+ * packet header, followed by quadlets from the packet payload if
+ * &fw_cdev_create_iso_context.header_size > 4.
+ *
+ * In version 2 of this ABI, header data consist of the 1394 isochronous
+ * packet header, followed by a timestamp quadlet if
+ * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
+ * packet payload if &fw_cdev_create_iso_context.header_size > 8.
+ *
+ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
+ *
+ * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
+ * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
+ * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
+ * order.
*/
struct fw_cdev_event_iso_interrupt {
__u64 closure;
@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt {
};
/**
+ * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
+ * @closure: See &fw_cdev_event_common;
+ * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
+ * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
+ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
+ * @handle: Reference by which an allocated resource can be deallocated
+ * @channel: Isochronous channel which was (de)allocated, if any
+ * @bandwidth: Bandwidth allocation units which were (de)allocated, if any
+ *
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
+ * resource was allocated at the IRM. The client has to check @channel and
+ * @bandwidth for whether the allocation actually succeeded.
+ *
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
+ * resource was deallocated at the IRM. It is also sent when automatic
+ * reallocation after a bus reset failed.
+ *
+ * @channel is <0 if no channel was (de)allocated or if reallocation failed.
+ * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
+ */
+struct fw_cdev_event_iso_resource {
+ __u64 closure;
+ __u32 type;
+ __u32 handle;
+ __s32 channel;
+ __s32 bandwidth;
+};
+
+/**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
+ * @iso_resource: Valid if @common.type ==
+ * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
+ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
*
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt {
* not fit will be discarded so that the next read(2) will return a new event.
*/
union fw_cdev_event {
- struct fw_cdev_event_common common;
- struct fw_cdev_event_bus_reset bus_reset;
- struct fw_cdev_event_response response;
- struct fw_cdev_event_request request;
- struct fw_cdev_event_iso_interrupt iso_interrupt;
+ struct fw_cdev_event_common common;
+ struct fw_cdev_event_bus_reset bus_reset;
+ struct fw_cdev_event_response response;
+ struct fw_cdev_event_request request;
+ struct fw_cdev_event_iso_interrupt iso_interrupt;
+ struct fw_cdev_event_iso_resource iso_resource;
};
-#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
-#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
-#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
-#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
-#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
-#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
-#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
-#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+/* available since kernel version 2.6.22 */
+#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
+#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
+#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
+#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
+#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
+#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
+#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
+#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
+#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
+#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
+#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
-#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
-#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
-#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
-#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
-#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
+/* available since kernel version 2.6.24 */
+#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
-/* FW_CDEV_VERSION History
- *
- * 1 Feb 18, 2007: Initial version.
+/* available since kernel version 2.6.30 */
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */
+#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
+#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
+
+/*
+ * FW_CDEV_VERSION History
+ * 1 (2.6.22) - initial version
+ * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
+ * &fw_cdev_create_iso_context.header_size is 8 or more
*/
-#define FW_CDEV_VERSION 1
+#define FW_CDEV_VERSION 2
/**
* struct fw_cdev_get_info - General purpose information ioctl
@@ -201,7 +266,7 @@ union fw_cdev_event {
* case, @rom_length is updated with the actual length of the
* configuration ROM.
* @rom: If non-zero, address of a buffer to be filled by a copy of the
- * local node's configuration ROM
+ * device's configuration ROM
* @bus_reset: If non-zero, address of a buffer to be filled by a
* &struct fw_cdev_event_bus_reset with the current state
* of the bus. This does not cause a bus reset to happen.
@@ -229,7 +294,7 @@ struct fw_cdev_get_info {
* Send a request to the device. This ioctl implements all outgoing requests.
* Both quadlet and block request specify the payload as a pointer to the data
* in the @data field. Once the transaction completes, the kernel writes an
- * &fw_cdev_event_request event back. The @closure field is passed back to
+ * &fw_cdev_event_response event back. The @closure field is passed back to
* user space in the response event.
*/
struct fw_cdev_send_request {
@@ -284,9 +349,9 @@ struct fw_cdev_allocate {
};
/**
- * struct fw_cdev_deallocate - Free an address range allocation
- * @handle: Handle to the address range, as returned by the kernel when the
- * range was allocated
+ * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
+ * @handle: Handle to the address range or iso resource, as returned by the
+ * kernel when the range or resource was allocated
*/
struct fw_cdev_deallocate {
__u32 handle;
@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset {
* If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and
* immediate key.
+ *
+ * This ioctl affects the configuration ROMs of all local nodes.
+ * The ioctl only succeeds on device files which represent a local node.
*/
struct fw_cdev_add_descriptor {
__u32 immediate;
@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor {
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
- * node's configuration ROM.
+ * nodes' configuration ROMs.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor {
*
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
+ *
+ * Note that the effect of a @header_size > 4 depends on
+ * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
*/
struct fw_cdev_create_iso_context {
__u32 type;
@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso {
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
* and also the system clock. This allows to express the receive time of an
* isochronous packet as a system time with microsecond accuracy.
+ *
+ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
+ * 12 bits cycleOffset, in host byte order.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
__u32 cycle_timer;
};
+/**
+ * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
+ * @closure: Passed back to userspace in correponding iso resource events
+ * @channels: Isochronous channels of which one is to be (de)allocated
+ * @bandwidth: Isochronous bandwidth units to be (de)allocated
+ * @handle: Handle to the allocation, written by the kernel (only valid in
+ * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
+ *
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
+ * isochronous channel and/or of isochronous bandwidth at the isochronous
+ * resource manager (IRM). Only one of the channels specified in @channels is
+ * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
+ * communication with the IRM, indicating success or failure in the event data.
+ * The kernel will automatically reallocate the resources after bus resets.
+ * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
+ * will be sent. The kernel will also automatically deallocate the resources
+ * when the file descriptor is closed.
+ *
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
+ * deallocation of resources which were allocated as described above.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
+ *
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
+ * without automatic re- or deallocation.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
+ * indicating success or failure in its data.
+ *
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
+ * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
+ * instead of allocated.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
+ *
+ * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
+ * for the lifetime of the fd or handle.
+ * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
+ * for the duration of a bus generation.
+ *
+ * @channels is a host-endian bitfield with the least significant bit
+ * representing channel 0 and the most significant bit representing channel 63:
+ * 1ULL << c for each channel c that is a candidate for (de)allocation.
+ *
+ * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
+ * one quadlet of data (payload or header data) at speed S1600.
+ */
+struct fw_cdev_allocate_iso_resource {
+ __u64 closure;
+ __u64 channels;
+ __u32 bandwidth;
+ __u32 handle;
+};
+
+/**
+ * struct fw_cdev_send_stream_packet - send an asynchronous stream packet
+ * @length: Length of outgoing payload, in bytes
+ * @tag: Data format tag
+ * @channel: Isochronous channel to transmit to
+ * @sy: Synchronization code
+ * @closure: Passed back to userspace in the response event
+ * @data: Userspace pointer to payload
+ * @generation: The bus generation where packet is valid
+ * @speed: Speed to transmit at
+ *
+ * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
+ * to every device which is listening to the specified channel. The kernel
+ * writes an &fw_cdev_event_response event which indicates success or failure of
+ * the transmission.
+ */
+struct fw_cdev_send_stream_packet {
+ __u32 length;
+ __u32 tag;
+ __u32 channel;
+ __u32 sy;
+ __u64 closure;
+ __u64 data;
+ __u32 generation;
+ __u32 speed;
+};
+
#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cd44f727dac..42436ae42f70 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1064,34 +1064,147 @@ extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
#else /* !CONFIG_FILE_LOCKING */
-#define fcntl_getlk(a, b) ({ -EINVAL; })
-#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk(struct file *file, struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
#if BITS_PER_LONG == 32
-#define fcntl_getlk64(a, b) ({ -EINVAL; })
-#define fcntl_setlk64(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 __user *user)
+{
+ return -EACCES;
+}
#endif
-#define fcntl_setlease(a, b, c) ({ 0; })
-#define fcntl_getlease(a) ({ 0; })
-#define locks_init_lock(a) ({ })
-#define __locks_copy_lock(a, b) ({ })
-#define locks_copy_lock(a, b) ({ })
-#define locks_remove_posix(a, b) ({ })
-#define locks_remove_flock(a) ({ })
-#define posix_test_lock(a, b) ({ 0; })
-#define posix_lock_file(a, b, c) ({ -ENOLCK; })
-#define posix_lock_file_wait(a, b) ({ -ENOLCK; })
-#define posix_unblock_lock(a, b) (-ENOENT)
-#define vfs_test_lock(a, b) ({ 0; })
-#define vfs_lock_file(a, b, c, d) (-ENOLCK)
-#define vfs_cancel_lock(a, b) ({ 0; })
-#define flock_lock_file_wait(a, b) ({ -ENOLCK; })
-#define __break_lease(a, b) ({ 0; })
-#define lease_get_mtime(a, b) ({ })
-#define generic_setlease(a, b, c) ({ -EINVAL; })
-#define vfs_setlease(a, b, c) ({ -EINVAL; })
-#define lease_modify(a, b) ({ -EINVAL; })
-#define lock_may_read(a, b, c) ({ 1; })
-#define lock_may_write(a, b, c) ({ 1; })
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+ return 0;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return 0;
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_flock(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_unblock_lock(struct file *filp,
+ struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int flock_lock_file_wait(struct file *filp,
+ struct file_lock *request)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, long arg,
+ struct file_lock **flp)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, long arg,
+ struct file_lock **lease)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lock **before, int arg)
+{
+ return -EINVAL;
+}
+
+static inline int lock_may_read(struct inode *inode, loff_t start,
+ unsigned long len)
+{
+ return 1;
+}
+
+static inline int lock_may_write(struct inode *inode, loff_t start,
+ unsigned long len)
+{
+ return 1;
+}
+
#endif /* !CONFIG_FILE_LOCKING */
@@ -1607,7 +1720,7 @@ struct super_block *sget(struct file_system_type *type,
extern int get_sb_pseudo(struct file_system_type *, char *,
const struct super_operations *ops, unsigned long,
struct vfsmount *mnt);
-extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1688,13 +1801,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
return 0;
}
#else /* !CONFIG_FILE_LOCKING */
-#define locks_mandatory_locked(a) ({ 0; })
-#define locks_mandatory_area(a, b, c, d, e) ({ 0; })
-#define __mandatory_lock(a) ({ 0; })
-#define mandatory_lock(a) ({ 0; })
-#define locks_verify_locked(a) ({ 0; })
-#define locks_verify_truncate(a, b, c) ({ 0; })
-#define break_lease(a, b) ({ 0; })
+static inline int locks_mandatory_locked(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_mandatory_area(int rw, struct inode *inode,
+ struct file *filp, loff_t offset,
+ size_t count)
+{
+ return 0;
+}
+
+static inline int __mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_verify_locked(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
+ size_t size)
+{
+ return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -1731,6 +1875,13 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern void invalidate_bdev(struct block_device *);
+extern int sync_blockdev(struct block_device *bdev);
+extern struct super_block *freeze_bdev(struct block_device *);
+extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+extern int fsync_bdev(struct block_device *);
+extern int fsync_super(struct super_block *);
+extern int fsync_no_super(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
#endif
@@ -1882,7 +2033,6 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
}
-extern int do_pipe(int *);
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b1bb817d1427..4b501b48ce86 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -18,6 +18,22 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+/*
+ * DS bit usage
+ *
+ * TA = transmitter address
+ * RA = receiver address
+ * DA = destination address
+ * SA = source address
+ *
+ * ToDS FromDS A1(RA) A2(TA) A3 A4 Use
+ * -----------------------------------------------------------------
+ * 0 0 DA SA BSSID - IBSS/DLS
+ * 0 1 DA BSSID SA - AP -> STA
+ * 1 0 BSSID SA DA - AP <- STA
+ * 1 1 RA TA DA SA unspecified (WDS)
+ */
+
#define FCS_LEN 4
#define IEEE80211_FCTL_VERS 0x0003
@@ -851,6 +867,7 @@ struct ieee80211_ht_info {
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
+#define WLAN_AUTH_FT 2
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 60e16a551dd6..673f2209453d 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -153,7 +153,6 @@ struct frhdr
struct dlci_local
{
- struct net_device_stats stats;
struct net_device *master;
struct net_device *slave;
struct dlci_conf config;
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index f69e66d151cc..30b06c893944 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
/* linux/fs/ncpfs/dir.c */
extern const struct inode_operations ncp_dir_inode_operations;
extern const struct file_operations ncp_dir_operations;
-extern struct dentry_operations ncp_root_dentry_operations;
+extern const struct dentry_operations ncp_root_dentry_operations;
int ncp_conn_logged_in(struct super_block *);
int ncp_date_dos2unix(__le16 time, __le16 date);
void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index be3ebd7e8ce5..1b55952a17f6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -32,6 +32,7 @@
#ifdef __KERNEL__
#include <linux/timer.h>
#include <linux/delay.h>
+#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index adbc50a20ec2..7b1a652066c0 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -437,6 +437,29 @@ extern void xt_free_table_info(struct xt_table_info *info);
extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
struct xt_table_info *new);
+/*
+ * This helper is performance critical and must be inlined
+ */
+static inline unsigned long ifname_compare_aligned(const char *_a,
+ const char *_b,
+ const char *_mask)
+{
+ const unsigned long *a = (const unsigned long *)_a;
+ const unsigned long *b = (const unsigned long *)_b;
+ const unsigned long *mask = (const unsigned long *)_mask;
+ unsigned long ret;
+
+ ret = (a[0] ^ b[0]) & mask[0];
+ if (IFNAMSIZ > sizeof(unsigned long))
+ ret |= (a[1] ^ b[1]) & mask[1];
+ if (IFNAMSIZ > 2 * sizeof(unsigned long))
+ ret |= (a[2] ^ b[2]) & mask[2];
+ if (IFNAMSIZ > 3 * sizeof(unsigned long))
+ ret |= (a[3] ^ b[3]) & mask[3];
+ BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index db867b04ac3c..8cc8807f77d6 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -415,7 +415,7 @@ extern const struct inode_operations nfs_dir_inode_operations;
extern const struct inode_operations nfs3_dir_inode_operations;
#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_dir_operations;
-extern struct dentry_operations nfs_dentry_operations;
+extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2e5f00066afd..43a713fce11c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -785,7 +785,7 @@ struct nfs_access_entry;
*/
struct nfs_rpc_ops {
u32 version; /* Protocol version */
- struct dentry_operations *dentry_ops;
+ const struct dentry_operations *dentry_ops;
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index f33aa08dd9b3..cbe8ce3bf486 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -142,6 +142,12 @@
* %NL80211_ATTR_IE. If the command succeeds, the requested data will be
* added to all specified management frames generated by
* kernel/firmware/driver.
+ * Note: This command has been removed and it is only reserved at this
+ * point to avoid re-using existing command number. The functionality this
+ * command was planned for has been provided with cleaner design with the
+ * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN,
+ * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE,
+ * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE.
*
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
@@ -161,6 +167,38 @@
* %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on
* to (%NL80211_ATTR_REG_ALPHA2).
*
+ * @NL80211_CMD_AUTHENTICATE: authentication request and notification.
+ * This command is used both as a command (request to authenticate) and
+ * as an event on the "mlme" multicast group indicating completion of the
+ * authentication process.
+ * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the
+ * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and
+ * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
+ * the SSID (mainly for association, but is included in authentication
+ * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used
+ * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE
+ * is used to specify the authentication type. %NL80211_ATTR_IE is used to
+ * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs)
+ * to be added to the frame.
+ * When used as an event, this reports reception of an Authentication
+ * frame in station and IBSS modes when the local MLME processed the
+ * frame, i.e., it was for the local STA and was received in correct
+ * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the
+ * MLME SAP interface (kernel providing MLME, userspace SME). The
+ * included NL80211_ATTR_FRAME attribute contains the management frame
+ * (including both the header and frame body, but not FCS).
+ * @NL80211_CMD_ASSOCIATE: association request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Association and Reassociation
+ * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
+ * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
+ * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
+ * primitives).
+ * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to
+ * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives).
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -206,7 +244,7 @@ enum nl80211_commands {
NL80211_CMD_GET_MESH_PARAMS,
NL80211_CMD_SET_MESH_PARAMS,
- NL80211_CMD_SET_MGMT_EXTRA_IE,
+ NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */,
NL80211_CMD_GET_REG,
@@ -217,6 +255,11 @@ enum nl80211_commands {
NL80211_CMD_REG_CHANGE,
+ NL80211_CMD_AUTHENTICATE,
+ NL80211_CMD_ASSOCIATE,
+ NL80211_CMD_DEAUTHENTICATE,
+ NL80211_CMD_DISASSOCIATE,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -230,8 +273,11 @@ enum nl80211_commands {
*/
#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE
-
#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE
+#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE
+#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE
+#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE
+#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
/**
* enum nl80211_attrs - nl80211 netlink attributes
@@ -349,6 +395,19 @@ enum nl80211_commands {
* @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently
* set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*)
*
+ * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies
+ * an array of command numbers (i.e. a mapping index to command number)
+ * that the driver for the given wiphy supports.
+ *
+ * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header
+ * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and
+ * NL80211_CMD_ASSOCIATE events
+ * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets)
+ * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type,
+ * represented as a u32
+ * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and
+ * %NL80211_CMD_DISASSOCIATE, u16
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -426,6 +485,13 @@ enum nl80211_attrs {
NL80211_ATTR_REG_INITIATOR,
NL80211_ATTR_REG_TYPE,
+ NL80211_ATTR_SUPPORTED_COMMANDS,
+
+ NL80211_ATTR_FRAME,
+ NL80211_ATTR_SSID,
+ NL80211_ATTR_AUTH_TYPE,
+ NL80211_ATTR_REASON_CODE,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -445,6 +511,10 @@ enum nl80211_attrs {
#define NL80211_ATTR_IE NL80211_ATTR_IE
#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR
#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE
+#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME
+#define NL80211_ATTR_SSID NL80211_ATTR_SSID
+#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE
+#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
@@ -978,4 +1048,18 @@ enum nl80211_bss {
NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1
};
+/**
+ * enum nl80211_auth_type - AuthenticationType
+ *
+ * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication
+ * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only)
+ * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
+ * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
+ */
+enum nl80211_auth_type {
+ NL80211_AUTHTYPE_OPEN_SYSTEM,
+ NL80211_AUTHTYPE_SHARED_KEY,
+ NL80211_AUTHTYPE_FT,
+ NL80211_AUTHTYPE_NETWORK_EAP,
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 097f410edefa..05dfa7c4fb64 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2271,6 +2271,8 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_VENDOR_ID_QMI 0x1a32
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d72d5d84fde5..78c48895b12a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -198,6 +198,7 @@ struct mem_dqblk {
qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
qsize_t dqb_curspace; /* current used space */
+ qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
qsize_t dqb_isoftlimit; /* preferred inode limit */
qsize_t dqb_curinodes; /* current # allocated inodes */
@@ -276,8 +277,6 @@ struct dquot {
struct mem_dqblk dq_dqb; /* Diskquota usage */
};
-#define NODQUOT (struct dquot *)NULL
-
#define QUOTA_OK 0
#define NO_QUOTA 1
@@ -308,6 +307,14 @@ struct dquot_operations {
int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
+ /* reserve quota for delayed block allocation */
+ int (*reserve_space) (struct inode *, qsize_t, int);
+ /* claim reserved quota for delayed alloc */
+ int (*claim_space) (struct inode *, qsize_t);
+ /* release rsved quota for delayed alloc */
+ void (*release_rsv) (struct inode *, qsize_t);
+ /* get reserved quota for delayed alloc */
+ qsize_t (*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 0b35b3a1be05..36353d95c8db 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot);
int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_alloc_inode(const struct inode *inode, qsize_t number);
+int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
+int dquot_claim_space(struct inode *inode, qsize_t number);
+void dquot_release_reserved_space(struct inode *inode, qsize_t number);
+qsize_t dquot_get_reserved_space(struct inode *inode);
+
int dquot_free_space(struct inode *inode, qsize_t number);
int dquot_free_inode(const struct inode *inode, qsize_t number);
@@ -183,6 +188,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return ret;
}
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb)) {
+ /* Used space is updated in alloc_space() */
+ if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
+ return 1;
+ }
+ return 0;
+}
+
static inline int vfs_dq_alloc_inode(struct inode *inode)
{
if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
return 0;
}
+/*
+ * Convert in-memory reserved quotas to real consumed quotas
+ */
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb)) {
+ if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
+ return 1;
+ } else
+ inode_add_bytes(inode, nr);
+
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+/*
+ * Release reserved (in-memory) quotas
+ */
+static inline
+void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb))
+ inode->i_sb->dq_op->release_rsv(inode, nr);
+}
+
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
@@ -339,6 +379,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return 0;
}
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+ return 0;
+}
+
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_alloc_space(inode, nr);
+}
+
+static inline
+int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+ return 0;
+}
+
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
inode_sub_bytes(inode, nr);
@@ -354,67 +410,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
{
- return vfs_dq_prealloc_space_nodirty(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
{
- return vfs_dq_prealloc_space(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
{
- return vfs_dq_alloc_space_nodirty(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
{
- return vfs_dq_alloc_space(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
+}
+
+static inline
+void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
+{
+ vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
{
- vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
+ vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
{
- vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
+ vfs_dq_free_space(inode, nr << inode->i_blkbits);
}
-/*
- * Define uppercase equivalents for compatibility with old function names
- * Can go away when we think all users have been converted (15/04/2008)
- */
-#define DQUOT_INIT(inode) vfs_dq_init(inode)
-#define DQUOT_DROP(inode) vfs_dq_drop(inode)
-#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
- vfs_dq_prealloc_space_nodirty(inode, nr)
-#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
-#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
- vfs_dq_alloc_space_nodirty(inode, nr)
-#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_prealloc_block_nodirty(inode, nr)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_alloc_block_nodirty(inode, nr)
-#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
-#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
-#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
- vfs_dq_free_space_nodirty(inode, nr)
-#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_free_block_nodirty(inode, nr)
-#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
-#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
-#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
-#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
-#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
-#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
-
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 50f3fd9ff524..5389afdc1297 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -471,26 +471,6 @@ struct ieee80211_txq_params {
u8 aifs;
};
-/**
- * struct mgmt_extra_ie_params - Extra management frame IE parameters
- *
- * Used to add extra IE(s) into management frames. If the driver cannot add the
- * requested data into all management frames of the specified subtype that are
- * generated in kernel or firmware/hardware, it must reject the configuration
- * call. The IE data buffer is added to the end of the specified management
- * frame body after all other IEs. This addition is not applied to frames that
- * are injected through a monitor interface.
- *
- * @subtype: Management frame subtype
- * @ies: IE data buffer or %NULL to remove previous data
- * @ies_len: Length of @ies in octets
- */
-struct mgmt_extra_ie_params {
- u8 subtype;
- u8 *ies;
- int ies_len;
-};
-
/* from net/wireless.h */
struct wiphy;
@@ -559,6 +539,7 @@ enum cfg80211_signal_type {
* is no guarantee that these are well-formed!)
* @len_information_elements: total length of the information elements
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @hold: BSS should not expire
* @free_priv: function pointer to free private data
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
@@ -579,6 +560,105 @@ struct cfg80211_bss {
};
/**
+ * struct cfg80211_auth_request - Authentication request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * authentication.
+ * NOTE: This structure will likely change when more code from mac80211 is
+ * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too.
+ * Before using this in a driver that does not use mac80211, it would be better
+ * to check the status of that work and better yet, volunteer to work on it.
+ *
+ * @chan: The channel to use or %NULL if not specified (auto-select based on
+ * scan results)
+ * @peer_addr: The address of the peer STA (AP BSSID in infrastructure case);
+ * this field is required to be present; if the driver wants to help with
+ * BSS selection, it should use (yet to be added) MLME event to allow user
+ * space SME to be notified of roaming candidate, so that the SME can then
+ * use the authentication request with the recommended BSSID and whatever
+ * other data may be needed for authentication/association
+ * @ssid: SSID or %NULL if not yet available
+ * @ssid_len: Length of ssid in octets
+ * @auth_type: Authentication type (algorithm)
+ * @ie: Extra IEs to add to Authentication frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ */
+struct cfg80211_auth_request {
+ struct ieee80211_channel *chan;
+ u8 *peer_addr;
+ const u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_auth_type auth_type;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct cfg80211_assoc_request - (Re)Association request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * (re)association.
+ * NOTE: This structure will likely change when more code from mac80211 is
+ * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too.
+ * Before using this in a driver that does not use mac80211, it would be better
+ * to check the status of that work and better yet, volunteer to work on it.
+ *
+ * @chan: The channel to use or %NULL if not specified (auto-select based on
+ * scan results)
+ * @peer_addr: The address of the peer STA (AP BSSID); this field is required
+ * to be present and the STA must be in State 2 (authenticated) with the
+ * peer STA
+ * @ssid: SSID
+ * @ssid_len: Length of ssid in octets
+ * @ie: Extra IEs to add to (Re)Association Request frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ */
+struct cfg80211_assoc_request {
+ struct ieee80211_channel *chan;
+ u8 *peer_addr;
+ const u8 *ssid;
+ size_t ssid_len;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct cfg80211_deauth_request - Deauthentication request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * deauthentication.
+ *
+ * @peer_addr: The address of the peer STA (AP BSSID); this field is required
+ * to be present and the STA must be authenticated with the peer STA
+ * @ie: Extra IEs to add to Deauthentication frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ */
+struct cfg80211_deauth_request {
+ u8 *peer_addr;
+ u16 reason_code;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct cfg80211_disassoc_request - Disassociation request data
+ *
+ * This structure provides information needed to complete IEEE 802.11
+ * disassocation.
+ *
+ * @peer_addr: The address of the peer STA (AP BSSID); this field is required
+ * to be present and the STA must be associated with the peer STA
+ * @ie: Extra IEs to add to Disassociation frame or %NULL
+ * @ie_len: Length of ie buffer in octets
+ */
+struct cfg80211_disassoc_request {
+ u8 *peer_addr;
+ u16 reason_code;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -644,12 +724,15 @@ struct cfg80211_bss {
*
* @set_channel: Set channel
*
- * @set_mgmt_extra_ie: Set extra IE data for management frames
- *
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
* For scan results, call cfg80211_inform_bss(); you can call this outside
* the scan/scan_done bracket too.
+ *
+ * @auth: Request to authenticate with the specified peer
+ * @assoc: Request to (re)associate with the specified peer
+ * @deauth: Request to deauthenticate from the specified peer
+ * @disassoc: Request to disassociate from the specified peer
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy);
@@ -724,12 +807,17 @@ struct cfg80211_ops {
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type);
- int (*set_mgmt_extra_ie)(struct wiphy *wiphy,
- struct net_device *dev,
- struct mgmt_extra_ie_params *params);
-
int (*scan)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_scan_request *request);
+
+ int (*auth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_auth_request *req);
+ int (*assoc)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_request *req);
+ int (*deauth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_deauth_request *req);
+ int (*disassoc)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_disassoc_request *req);
};
/* temporary wext handlers */
@@ -807,4 +895,67 @@ void cfg80211_put_bss(struct cfg80211_bss *bss);
*/
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+/**
+ * cfg80211_send_rx_auth - notification of processed authentication
+ * @dev: network device
+ * @buf: authentication frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever an authentication has been processed in
+ * station mode.
+ */
+void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
+
+/**
+ * cfg80211_send_rx_assoc - notification of processed association
+ * @dev: network device
+ * @buf: (re)association response frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever a (re)association response has been
+ * processed in station mode.
+ */
+void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len);
+
+/**
+ * cfg80211_send_rx_deauth - notification of processed deauthentication
+ * @dev: network device
+ * @buf: deauthentication frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever deauthentication has been processed in
+ * station mode.
+ */
+void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf,
+ size_t len);
+
+/**
+ * cfg80211_send_rx_disassoc - notification of processed disassociation
+ * @dev: network device
+ * @buf: disassociation response frame (header + body)
+ * @len: length of the frame data
+ *
+ * This function is called whenever disassociation has been processed in
+ * station mode.
+ */
+void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf,
+ size_t len);
+
+/**
+ * cfg80211_hold_bss - exclude bss from expiration
+ * @bss: bss which should not expire
+ *
+ * In a case when the BSS is not updated but it shouldn't expire this
+ * function can be used to mark the BSS to be excluded from expiration.
+ */
+void cfg80211_hold_bss(struct cfg80211_bss *bss);
+
+/**
+ * cfg80211_unhold_bss - remove expiration exception from the BSS
+ * @bss: bss which can expire again
+ *
+ * This function marks the BSS to be expirable again.
+ */
+void cfg80211_unhold_bss(struct cfg80211_bss *bss);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
new file mode 100644
index 000000000000..96f3789b27bc
--- /dev/null
+++ b/include/net/ethoc.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/net/ethoc.h
+ *
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#ifndef LINUX_NET_ETHOC_H
+#define LINUX_NET_ETHOC_H 1
+
+struct ethoc_platform_data {
+ u8 hwaddr[IFHWADDRLEN];
+ s8 phy_id;
+};
+
+#endif /* !LINUX_NET_ETHOC_H */
+
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 384698cb773a..23c3f3d97779 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -230,8 +230,10 @@ enum ieee80211_radiotap_type {
* 802.11 header and payload
* (to 32-bit boundary)
*/
+#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */
+
/* For IEEE80211_RADIOTAP_RX_FLAGS */
-#define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */
+#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */
/* For IEEE80211_RADIOTAP_TX_FLAGS */
#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 12a52efcd0d1..3b83a80e3fe0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -93,12 +93,9 @@ struct ieee80211_ht_bss_info {
* enum ieee80211_max_queues - maximum number of queues
*
* @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
- * @IEEE80211_MAX_AMPDU_QUEUES: Maximum number of queues usable
- * for A-MPDU operation.
*/
enum ieee80211_max_queues {
- IEEE80211_MAX_QUEUES = 16,
- IEEE80211_MAX_AMPDU_QUEUES = 16,
+ IEEE80211_MAX_QUEUES = 4,
};
/**
@@ -245,6 +242,12 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
* set by rate control algorithms to indicate probe rate, will
* be cleared for fragmented frames (except on the last fragment)
+ * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or
+ * set this flag in the driver; indicates that the rate control
+ * algorithm was used and should be notified of TX status
+ * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
@@ -260,6 +263,8 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_STAT_AMPDU = BIT(10),
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
+ IEEE80211_TX_INTFL_RCALGO = BIT(13),
+ IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
};
/**
@@ -520,12 +525,6 @@ enum ieee80211_conf_flags {
IEEE80211_CONF_PS = (1<<1),
};
-/* XXX: remove all this once drivers stop trying to use it */
-static inline int __deprecated __IEEE80211_CONF_SHORT_SLOT_TIME(void)
-{
- return 0;
-}
-#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME())
/**
* enum ieee80211_conf_changed - denotes which configuration changed
@@ -888,6 +887,10 @@ enum ieee80211_tkip_key_type {
*
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
+ *
+ * @IEEE80211_HW_BEACON_FILTER:
+ * Hardware supports dropping of irrelevant beacon frames to
+ * avoid waking up cpu.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
@@ -903,6 +906,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
+ IEEE80211_HW_BEACON_FILTER = 1<<14,
};
/**
@@ -945,12 +949,6 @@ enum ieee80211_hw_flags {
* data packets. WMM/QoS requires at least four, these
* queues need to have configurable access parameters.
*
- * @ampdu_queues: number of available hardware transmit queues
- * for A-MPDU packets, these have no access parameters
- * because they're used only for A-MPDU frames. Note that
- * mac80211 will not currently use any of the regular queues
- * for aggregation.
- *
* @rate_control_algorithm: rate control algorithm for this hardware.
* If unset (NULL), the default algorithm will be used. Must be
* set before calling ieee80211_register_hw().
@@ -975,7 +973,6 @@ struct ieee80211_hw {
int vif_data_size;
int sta_data_size;
u16 queues;
- u16 ampdu_queues;
u16 max_listen_interval;
s8 max_signal;
u8 max_rates;
@@ -1017,11 +1014,6 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
}
-static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw)
-{
- return hw->queues;
-}
-
static inline struct ieee80211_rate *
ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *c)
@@ -1132,6 +1124,24 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
*/
/**
+ * DOC: Beacon filter support
+ *
+ * Some hardware have beacon filter support to reduce host cpu wakeups
+ * which will reduce system power consumption. It usuallly works so that
+ * the firmware creates a checksum of the beacon but omits all constantly
+ * changing elements (TSF, TIM etc). Whenever the checksum changes the
+ * beacon is forwarded to the host, otherwise it will be just dropped. That
+ * way the host will only receive beacons where some relevant information
+ * (for example ERP protection or WMM settings) have changed.
+ *
+ * Beacon filter support is informed with %IEEE80211_HW_BEACON_FILTER flag.
+ * The driver needs to enable beacon filter support whenever power save is
+ * enabled, that is %IEEE80211_CONF_PS is set. When power save is enabled,
+ * the stack will not check for beacon miss at all and the driver needs to
+ * notify about complete loss of beacons with ieee80211_beacon_loss().
+ */
+
+/**
* DOC: Frame filtering
*
* mac80211 requires to see many management frames for proper
@@ -1220,14 +1230,14 @@ enum ieee80211_filter_flags {
* @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation
* @IEEE80211_AMPDU_TX_START: start Tx aggregation
* @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation
- * @IEEE80211_AMPDU_TX_RESUME: resume TX aggregation
+ * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
*/
enum ieee80211_ampdu_mlme_action {
IEEE80211_AMPDU_RX_START,
IEEE80211_AMPDU_RX_STOP,
IEEE80211_AMPDU_TX_START,
IEEE80211_AMPDU_TX_STOP,
- IEEE80211_AMPDU_TX_RESUME,
+ IEEE80211_AMPDU_TX_OPERATIONAL,
};
/**
@@ -1318,11 +1328,13 @@ enum ieee80211_ampdu_mlme_action {
*
* @hw_scan: Ask the hardware to service the scan request, no need to start
* the scan state machine in stack. The scan must honour the channel
- * configuration done by the regulatory agent in the wiphy's registered
- * bands. When the scan finishes, ieee80211_scan_completed() must be
- * called; note that it also must be called when the scan cannot finish
- * because the hardware is turned off! Anything else is a bug!
- * Returns a negative error code which will be seen in userspace.
+ * configuration done by the regulatory agent in the wiphy's
+ * registered bands. The hardware (or the driver) needs to make sure
+ * that power save is disabled. When the scan finishes,
+ * ieee80211_scan_completed() must be called; note that it also must
+ * be called when the scan cannot finish because the hardware is
+ * turned off! Anything else is a bug! Returns a negative error code
+ * which will be seen in userspace.
*
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
@@ -1350,8 +1362,8 @@ enum ieee80211_ampdu_mlme_action {
* @get_tx_stats: Get statistics of the current TX queue status. This is used
* to get number of currently queued packets (queue length), maximum queue
* size (limit), and total number of packets sent using each TX queue
- * (count). The 'stats' pointer points to an array that has hw->queues +
- * hw->ampdu_queues items.
+ * (count). The 'stats' pointer points to an array that has hw->queues
+ * items.
*
* @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
* this is only used for IBSS mode BSSID merging and debugging. Is not a
@@ -1979,6 +1991,16 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra,
struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
const u8 *addr);
+/**
+ * ieee80211_beacon_loss - inform hardware does not receive beacons
+ *
+ * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ *
+ * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and
+ * IEEE80211_CONF_PS is set, the driver needs to inform whenever the
+ * hardware is not receiving beacons with this function.
+ */
+void ieee80211_beacon_loss(struct ieee80211_vif *vif);
/* Rate control API */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 4dfb793c3f15..6c3f964de9e1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -91,8 +91,7 @@ struct nf_conn_help {
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-struct nf_conn
-{
+struct nf_conn {
/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
plus 1 for any connection(s) we are `master' for */
struct nf_conntrack ct_general;
@@ -126,7 +125,6 @@ struct nf_conn
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
- struct rcu_head rcu;
};
static inline struct nf_conn *
@@ -190,9 +188,13 @@ static inline void nf_ct_put(struct nf_conn *ct)
extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
extern void nf_ct_l3proto_module_put(unsigned short l3proto);
-extern struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced);
-extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced,
- unsigned int size);
+/*
+ * Allocate a hashtable of hlist_head (if nulls == 0),
+ * or hlist_nulls_head (if nulls == 1)
+ */
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+
+extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 66d65a7caa39..ee2a4b369a04 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -14,6 +14,8 @@
struct module;
+#define NF_CT_HELPER_NAME_LEN 16
+
struct nf_conntrack_helper
{
struct hlist_node hnode; /* Internal use. */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 0378676c3dd8..9f99d36d5de9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -53,10 +53,17 @@ struct nf_conntrack_l3proto
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
+ /*
+ * Calculate size of tuple nlattr
+ */
+ int (*nlattr_tuple_size)(void);
+
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
+ size_t nla_size;
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_header;
struct ctl_path *ctl_table_path;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index b01070bf2f84..ba32ed7bdabe 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -64,16 +64,22 @@ struct nf_conntrack_l4proto
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
const struct nf_conn *ct);
+ /* Calculate protoinfo nlattr size */
+ int (*nlattr_size)(void);
/* convert nfnetlink attributes to protoinfo */
int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
+ /* Calculate tuple nlattr size */
+ int (*nlattr_tuple_size)(void);
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
+ size_t nla_size;
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header **ctl_table_header;
struct ctl_table *ctl_table;
@@ -107,6 +113,7 @@ extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
+extern int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index f2f6aa73dc10..2628c154d40e 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -12,6 +12,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/list_nulls.h>
/* A `tuple' is a structure containing the information to uniquely
identify a connection. ie. if two packets have the same tuple, they
@@ -146,9 +147,8 @@ static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
((enum ip_conntrack_dir)(h)->tuple.dst.dir)
/* Connections have two entries in the hash table: one for each way */
-struct nf_conntrack_tuple_hash
-{
- struct hlist_node hnode;
+struct nf_conntrack_tuple_hash {
+ struct hlist_nulls_node hnnode;
struct nf_conntrack_tuple tuple;
};
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 8a6150a3f4c7..eddb50289d6d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -230,6 +230,7 @@ extern int nla_validate(struct nlattr *head, int len, int maxtype,
extern int nla_parse(struct nlattr *tb[], int maxtype,
struct nlattr *head, int len,
const struct nla_policy *policy);
+extern int nla_policy_len(const struct nla_policy *, int);
extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype);
extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
size_t dstsize);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index f4498a62881b..9dc58402bc09 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -2,6 +2,7 @@
#define __NETNS_CONNTRACK_H
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <asm/atomic.h>
struct ctl_table_header;
@@ -10,9 +11,9 @@ struct nf_conntrack_ecache;
struct netns_ct {
atomic_t count;
unsigned int expect_count;
- struct hlist_head *hash;
+ struct hlist_nulls_head *hash;
struct hlist_head *expect_hash;
- struct hlist_head unconfirmed;
+ struct hlist_nulls_head unconfirmed;
struct ip_conntrack_stat *stat;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b4..c500ca7239b2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1071,7 +1071,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
mutex_unlock(&cgroup_mutex);
}
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
free_cg_links:
free_cg_links(&tmp_cg_links);
@@ -1627,7 +1628,7 @@ static struct inode_operations cgroup_dir_inode_operations = {
static int cgroup_create_file(struct dentry *dentry, int mode,
struct super_block *sb)
{
- static struct dentry_operations cgroup_dops = {
+ static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
};
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 80009a24e21d..c4706eb98d3d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -133,6 +133,32 @@ errout:
}
/**
+ * nla_policy_len - Determin the max. length of a policy
+ * @policy: policy to use
+ * @n: number of policies
+ *
+ * Determines the max. length of the policy. It is currently used
+ * to allocated Netlink buffers roughly the size of the actual
+ * message.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int
+nla_policy_len(const struct nla_policy *p, int n)
+{
+ int i, len = 0;
+
+ for (i = 0; i < n; i++) {
+ if (p->len)
+ len += nla_total_size(p->len);
+ else if (nla_attr_minlen[p->type])
+ len += nla_total_size(nla_attr_minlen[p->type]);
+ }
+
+ return len;
+}
+
+/**
* nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
@@ -467,6 +493,7 @@ EXPORT_SYMBOL(nla_append);
#endif
EXPORT_SYMBOL(nla_validate);
+EXPORT_SYMBOL(nla_policy_len);
EXPORT_SYMBOL(nla_parse);
EXPORT_SYMBOL(nla_find);
EXPORT_SYMBOL(nla_strlcpy);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3e0671df3a3f..d6a9243641af 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1571,14 +1571,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
usat->sat_family != AF_APPLETALK)
return -EINVAL;
- /* netatalk doesn't implement this check */
+ /* netatalk didn't implement this check */
if (usat->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
- printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as "
- "it will break before 2.2\n");
-#if 0
return -EPERM;
-#endif
}
} else {
if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 7da5ebb84e97..fd9d06f291dc 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1435,11 +1435,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
size_t size;
int lv, err, addr_len = msg->msg_namelen;
- /* AX.25 empty data frame has no meaning : don't send */
- if (len == 0) {
- return (0);
- }
-
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
@@ -1639,13 +1634,6 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
copied = skb->len;
- /* AX.25 empty data frame has no meaning : ignore it */
- if (copied == 0) {
- err = copied;
- skb_free_datagram(sk, skb);
- goto out;
- }
-
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
diff --git a/net/core/dev.c b/net/core/dev.c
index 052dd478d3e1..63ec4bf89b29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2627,18 +2627,15 @@ static int process_backlog(struct napi_struct *napi, int quota)
local_irq_disable();
skb = __skb_dequeue(&queue->input_pkt_queue);
if (!skb) {
+ __napi_complete(napi);
local_irq_enable();
- napi_complete(napi);
- goto out;
+ break;
}
local_irq_enable();
- napi_gro_receive(napi, skb);
+ netif_receive_skb(skb);
} while (++work < quota && jiffies == start_time);
- napi_gro_flush(napi);
-
-out:
return work;
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 84b9c179df51..35c5f6a5cb7c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -81,19 +81,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- const unsigned long *a = (const unsigned long *)_a;
- const unsigned long *b = (const unsigned long *)_b;
- const unsigned long *mask = (const unsigned long *)_mask;
- unsigned long ret;
-
- ret = (a[0] ^ b[0]) & mask[0];
- if (IFNAMSIZ > sizeof(unsigned long))
- ret |= (a[1] ^ b[1]) & mask[1];
- if (IFNAMSIZ > 2 * sizeof(unsigned long))
- ret |= (a[2] ^ b[2]) & mask[2];
- if (IFNAMSIZ > 3 * sizeof(unsigned long))
- ret |= (a[3] ^ b[3]) & mask[3];
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
#else
unsigned long ret = 0;
const u16 *a = (const u16 *)_a;
@@ -404,7 +392,9 @@ static int mark_source_chains(struct xt_table_info *newinfo,
&& unconditional(&e->arp)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ ARPT_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e5294aec967d..82ee7c9049ff 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -74,25 +74,6 @@ do { \
Hence the start of any table is given by get_table() below. */
-static unsigned long ifname_compare(const char *_a, const char *_b,
- const unsigned char *_mask)
-{
- const unsigned long *a = (const unsigned long *)_a;
- const unsigned long *b = (const unsigned long *)_b;
- const unsigned long *mask = (const unsigned long *)_mask;
- unsigned long ret;
-
- ret = (a[0] ^ b[0]) & mask[0];
- if (IFNAMSIZ > sizeof(unsigned long))
- ret |= (a[1] ^ b[1]) & mask[1];
- if (IFNAMSIZ > 2 * sizeof(unsigned long))
- ret |= (a[2] ^ b[2]) & mask[2];
- if (IFNAMSIZ > 3 * sizeof(unsigned long))
- ret |= (a[3] ^ b[3]) & mask[3];
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
- return ret;
-}
-
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
@@ -121,7 +102,7 @@ ip_packet_match(const struct iphdr *ip,
return false;
}
- ret = ifname_compare(indev, ipinfo->iniface, ipinfo->iniface_mask);
+ ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -130,7 +111,7 @@ ip_packet_match(const struct iphdr *ip,
return false;
}
- ret = ifname_compare(outdev, ipinfo->outiface, ipinfo->outiface_mask);
+ ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -507,7 +488,9 @@ mark_source_chains(struct xt_table_info *newinfo,
&& unconditional(&e->ip)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ IPT_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 8b681f24e271..7d2ead7228ac 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -328,6 +328,11 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int ipv4_nlattr_tuple_size(void)
+{
+ return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1);
+}
#endif
static struct nf_sockopt_ops so_getorigdst = {
@@ -347,6 +352,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.get_l4proto = ipv4_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
+ .nlattr_tuple_size = ipv4_nlattr_tuple_size,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 6ba5c557690c..8668a3defda6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -25,40 +25,42 @@ struct ct_iter_state {
unsigned int bucket;
};
-static struct hlist_node *ct_get_first(struct seq_file *seq)
+static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
- if (n)
+ if (!is_a_nulls(n))
return n;
}
return NULL;
}
-static struct hlist_node *ct_get_next(struct seq_file *seq,
- struct hlist_node *head)
+static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
+ struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(head->next);
- while (head == NULL) {
- if (++st->bucket >= nf_conntrack_htable_size)
- return NULL;
+ while (is_a_nulls(head)) {
+ if (likely(get_nulls_value(head) == st->bucket)) {
+ if (++st->bucket >= nf_conntrack_htable_size)
+ return NULL;
+ }
head = rcu_dereference(net->ct.hash[st->bucket].first);
}
return head;
}
-static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
+static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
- struct hlist_node *head = ct_get_first(seq);
+ struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
@@ -87,69 +89,76 @@ static void ct_seq_stop(struct seq_file *s, void *v)
static int ct_seq_show(struct seq_file *s, void *v)
{
- const struct nf_conntrack_tuple_hash *hash = v;
- const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
+ struct nf_conntrack_tuple_hash *hash = v;
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
+ int ret = 0;
NF_CT_ASSERT(ct);
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ return 0;
+
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
- return 0;
+ goto release;
if (nf_ct_l3num(ct) != AF_INET)
- return 0;
+ goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
+ ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %ld ",
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
- return -ENOSPC;
+ goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
- return -ENOSPC;
+ goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
- return -ENOSPC;
+ goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
- return -ENOSPC;
+ goto release;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (seq_printf(s, "mark=%u ", ct->mark))
- return -ENOSPC;
+ goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
- return -ENOSPC;
+ goto release;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
- return -ENOSPC;
-
- return 0;
+ goto release;
+ ret = 0;
+release:
+ nf_ct_put(ct);
+ return ret;
}
static const struct seq_operations ct_seq_ops = {
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 2a8bee26f43d..23b2c2ee869a 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -262,6 +262,11 @@ static int icmp_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int icmp_nlattr_tuple_size(void)
+{
+ return nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -309,6 +314,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.me = NULL,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
+ .nlattr_tuple_size = icmp_nlattr_tuple_size,
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a65cf692359f..fe65187810f0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -679,7 +679,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
static int __net_init nf_nat_net_init(struct net *net)
{
net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
- &net->ipv4.nat_vmalloced);
+ &net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f171e8dbac91..8f04bd9da274 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -75,8 +75,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
- rcu_read_unlock();
- goto out;
+ goto drop;
}
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
@@ -147,7 +146,6 @@ err:
drop:
rcu_read_unlock();
kfree_skb(skb);
-out:
return 0;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 34af7bb8df5f..e89cfa3a8f25 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -89,25 +89,6 @@ ip6t_ext_hdr(u8 nexthdr)
(nexthdr == IPPROTO_DSTOPTS) );
}
-static unsigned long ifname_compare(const char *_a, const char *_b,
- const unsigned char *_mask)
-{
- const unsigned long *a = (const unsigned long *)_a;
- const unsigned long *b = (const unsigned long *)_b;
- const unsigned long *mask = (const unsigned long *)_mask;
- unsigned long ret;
-
- ret = (a[0] ^ b[0]) & mask[0];
- if (IFNAMSIZ > sizeof(unsigned long))
- ret |= (a[1] ^ b[1]) & mask[1];
- if (IFNAMSIZ > 2 * sizeof(unsigned long))
- ret |= (a[2] ^ b[2]) & mask[2];
- if (IFNAMSIZ > 3 * sizeof(unsigned long))
- ret |= (a[3] ^ b[3]) & mask[3];
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
- return ret;
-}
-
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
@@ -138,7 +119,7 @@ ip6_packet_match(const struct sk_buff *skb,
return false;
}
- ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask);
+ ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -147,7 +128,7 @@ ip6_packet_match(const struct sk_buff *skb,
return false;
}
- ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask);
+ ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -536,7 +517,9 @@ mark_source_chains(struct xt_table_info *newinfo,
&& unconditional(&e->ipv6)) || visited) {
unsigned int oldpos, size;
- if (t->verdict < -NF_MAX_VERDICT - 1) {
+ if ((strcmp(t->target.u.user.name,
+ IP6T_STANDARD_TARGET) == 0) &&
+ t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index e6852f617217..2a15c2d66c69 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -342,6 +342,11 @@ static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int ipv6_nlattr_tuple_size(void)
+{
+ return nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1);
+}
#endif
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
@@ -353,6 +358,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
.get_l4proto = ipv6_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = ipv6_tuple_to_nlattr,
+ .nlattr_tuple_size = ipv6_nlattr_tuple_size,
.nlattr_to_tuple = ipv6_nlattr_to_tuple,
.nla_policy = ipv6_nla_policy,
#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 41b8a956e1be..9903227bf37c 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -269,6 +269,11 @@ static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
+
+static int icmpv6_nlattr_tuple_size(void)
+{
+ return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -300,6 +305,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
.error = icmpv6_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
+ .nlattr_tuple_size = icmpv6_nlattr_tuple_size,
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a95affc94629..07656d830bc4 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -197,6 +197,14 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
+ if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Suspend in progress. "
+ "Denying ADDBA request\n");
+#endif
+ goto end_no_lock;
+ }
+
/* sanity check for incoming parameters:
* check if configuration can support the BA policy
* and if buffer size does not exceeds max value */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1df116d4d6e7..947aaaad35d2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -131,24 +131,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
state = &sta->ampdu_mlme.tid_state_tx[tid];
- if (local->hw.ampdu_queues) {
- if (initiator) {
- /*
- * Stop the AC queue to avoid issues where we send
- * unaggregated frames already before the delba.
- */
- ieee80211_stop_queue_by_reason(&local->hw,
- local->hw.queues + sta->tid_to_tx_q[tid],
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- }
-
- /*
- * Pretend the driver woke the queue, just in case
- * it disabled it before the session was stopped.
- */
- ieee80211_wake_queue(
- &local->hw, local->hw.queues + sta->tid_to_tx_q[tid]);
- }
*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
@@ -158,6 +140,10 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
*state = HT_AGG_STATE_OPERATIONAL;
+ /*
+ * We may have pending packets get stuck in this case...
+ * Not bothering with a workaround for now.
+ */
}
return ret;
@@ -212,7 +198,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
struct sta_info *sta;
struct ieee80211_sub_if_data *sdata;
u8 *state;
- int i, qn = -1, ret = 0;
+ int ret = 0;
u16 start_seq_num;
if (WARN_ON(!local->ops->ampdu_action))
@@ -226,13 +212,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
ra, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- if (hw->ampdu_queues && ieee80211_ac_from_tid(tid) == 0) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "rejecting on voice AC\n");
-#endif
- return -EINVAL;
- }
-
rcu_read_lock();
sta = sta_info_get(local, ra);
@@ -257,7 +236,17 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
goto unlock;
}
+ if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Suspend in progress. "
+ "Denying BA session request\n");
+#endif
+ ret = -EINVAL;
+ goto unlock;
+ }
+
spin_lock_bh(&sta->lock);
+ spin_lock(&local->ampdu_lock);
sdata = sta->sdata;
@@ -278,41 +267,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
goto err_unlock_sta;
}
- if (hw->ampdu_queues) {
- spin_lock(&local->queue_stop_reason_lock);
- /* reserve a new queue for this session */
- for (i = 0; i < local->hw.ampdu_queues; i++) {
- if (local->ampdu_ac_queue[i] < 0) {
- qn = i;
- local->ampdu_ac_queue[qn] =
- ieee80211_ac_from_tid(tid);
- break;
- }
- }
- spin_unlock(&local->queue_stop_reason_lock);
-
- if (qn < 0) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - "
- "queue unavailable for tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
- ret = -ENOSPC;
- goto err_unlock_sta;
- }
-
- /*
- * If we successfully allocate the session, we can't have
- * anything going on on the queue this TID maps into, so
- * stop it for now. This is a "virtual" stop using the same
- * mechanism that drivers will use.
- *
- * XXX: queue up frames for this session in the sta_info
- * struct instead to avoid hitting all other STAs.
- */
- ieee80211_stop_queue_by_reason(
- &local->hw, hw->queues + qn,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- }
+ /*
+ * While we're asking the driver about the aggregation,
+ * stop the AC queue so that we don't have to worry
+ * about frames that came in while we were doing that,
+ * which would require us to put them to the AC pending
+ * afterwards which just makes the code more complex.
+ */
+ ieee80211_stop_queue_by_reason(
+ &local->hw, ieee80211_ac_from_tid(tid),
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
/* prepare A-MPDU MLME for Tx aggregation */
sta->ampdu_mlme.tid_tx[tid] =
@@ -324,9 +288,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
tid);
#endif
ret = -ENOMEM;
- goto err_return_queue;
+ goto err_wake_queue;
}
+ skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
+
/* Tx timer */
sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
sta_addba_resp_timer_expired;
@@ -351,8 +317,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
*state = HT_AGG_STATE_IDLE;
goto err_free;
}
- sta->tid_to_tx_q[tid] = qn;
+ /* Driver vetoed or OKed, but we can take packets again now */
+ ieee80211_wake_queue_by_reason(
+ &local->hw, ieee80211_ac_from_tid(tid),
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+
+ spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
/* send an addBA request */
@@ -377,17 +348,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
err_free:
kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL;
- err_return_queue:
- if (qn >= 0) {
- /* We failed, so start queue again right away. */
- ieee80211_wake_queue_by_reason(hw, hw->queues + qn,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- /* give queue back to pool */
- spin_lock(&local->queue_stop_reason_lock);
- local->ampdu_ac_queue[qn] = -1;
- spin_unlock(&local->queue_stop_reason_lock);
- }
+ err_wake_queue:
+ ieee80211_wake_queue_by_reason(
+ &local->hw, ieee80211_ac_from_tid(tid),
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
err_unlock_sta:
+ spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
unlock:
rcu_read_unlock();
@@ -395,6 +361,67 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish and holding
+ * local->ampdu_lock across both calls.
+ */
+static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tid)
+{
+ unsigned long flags;
+ u16 queue = ieee80211_ac_from_tid(tid);
+
+ ieee80211_stop_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+
+ if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* mark queue as pending, it is stopped already */
+ __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
+ &local->queue_stop_reasons[queue]);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(
+ &sta->ampdu_mlme.tid_tx[tid]->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tid)
+{
+ u16 queue = ieee80211_ac_from_tid(tid);
+
+ ieee80211_wake_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+}
+
+/* caller must hold sta->lock */
+static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tid)
+{
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
+#endif
+
+ spin_lock(&local->ampdu_lock);
+ ieee80211_agg_splice_packets(local, sta, tid);
+ /*
+ * NB: we rely on sta->lock being taken in the TX
+ * processing here when adding to the pending queue,
+ * otherwise we could only change the state of the
+ * session to OPERATIONAL _here_.
+ */
+ ieee80211_agg_splice_finish(local, sta, tid);
+ spin_unlock(&local->ampdu_lock);
+
+ local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL,
+ &sta->sta, tid, NULL);
+}
+
void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -437,20 +464,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
*state |= HT_ADDBA_DRV_READY_MSK;
- if (*state == HT_AGG_STATE_OPERATIONAL) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
-#endif
- if (hw->ampdu_queues) {
- /*
- * Wake up this queue, we stopped it earlier,
- * this will in turn wake the entire AC.
- */
- ieee80211_wake_queue_by_reason(hw,
- hw->queues + sta->tid_to_tx_q[tid],
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- }
- }
+ if (*state == HT_AGG_STATE_OPERATIONAL)
+ ieee80211_agg_tx_operational(local, sta, tid);
out:
spin_unlock_bh(&sta->lock);
@@ -584,22 +599,19 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
spin_lock_bh(&sta->lock);
+ spin_lock(&local->ampdu_lock);
- if (*state & HT_AGG_STATE_INITIATOR_MSK &&
- hw->ampdu_queues) {
- /*
- * Wake up this queue, we stopped it earlier,
- * this will in turn wake the entire AC.
- */
- ieee80211_wake_queue_by_reason(hw,
- hw->queues + sta->tid_to_tx_q[tid],
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- }
+ ieee80211_agg_splice_packets(local, sta, tid);
*state = HT_AGG_STATE_IDLE;
+ /* from now on packets are no longer put onto sta->pending */
sta->ampdu_mlme.addba_req_num[tid] = 0;
kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL;
+
+ ieee80211_agg_splice_finish(local, sta, tid);
+
+ spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
rcu_read_unlock();
@@ -637,9 +649,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt,
size_t len)
{
- struct ieee80211_hw *hw = &local->hw;
- u16 capab;
- u16 tid, start_seq_num;
+ u16 capab, tid;
u8 *state;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
@@ -673,26 +683,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
*state |= HT_ADDBA_RECEIVED_MSK;
- if (hw->ampdu_queues && *state != curstate &&
- *state == HT_AGG_STATE_OPERATIONAL) {
- /*
- * Wake up this queue, we stopped it earlier,
- * this will in turn wake the entire AC.
- */
- ieee80211_wake_queue_by_reason(hw,
- hw->queues + sta->tid_to_tx_q[tid],
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- }
- sta->ampdu_mlme.addba_req_num[tid] = 0;
+ if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
+ ieee80211_agg_tx_operational(local, sta, tid);
- if (local->ops->ampdu_action) {
- (void)local->ops->ampdu_action(hw,
- IEEE80211_AMPDU_TX_RESUME,
- &sta->sta, tid, &start_seq_num);
- }
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ sta->ampdu_mlme.addba_req_num[tid] = 0;
} else {
sta->ampdu_mlme.addba_req_num[tid]++;
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 58693e52d458..e677b751d468 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -540,9 +540,6 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_AP)
- return -EINVAL;
-
old = sdata->u.ap.beacon;
if (old)
@@ -559,9 +556,6 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_AP)
- return -EINVAL;
-
old = sdata->u.ap.beacon;
if (!old)
@@ -577,9 +571,6 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_AP)
- return -EINVAL;
-
old = sdata->u.ap.beacon;
if (!old)
@@ -728,10 +719,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
int err;
int layer2_update;
- /* Prevent a race with changing the rate control algorithm */
- if (!netif_running(dev))
- return -ENETDOWN;
-
if (params->vlan) {
sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -860,14 +847,8 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
int err;
- if (!netif_running(dev))
- return -ENETDOWN;
-
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
-
rcu_read_lock();
sta = sta_info_get(local, next_hop);
if (!sta) {
@@ -913,14 +894,8 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
struct mesh_path *mpath;
struct sta_info *sta;
- if (!netif_running(dev))
- return -ENETDOWN;
-
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
-
rcu_read_lock();
sta = sta_info_get(local, next_hop);
@@ -989,9 +964,6 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
-
rcu_read_lock();
mpath = mesh_path_lookup(dst, sdata);
if (!mpath) {
@@ -1013,9 +985,6 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
-
rcu_read_lock();
mpath = mesh_path_lookup_by_idx(idx, sdata);
if (!mpath) {
@@ -1035,8 +1004,6 @@ static int ieee80211_get_mesh_params(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config));
return 0;
}
@@ -1054,9 +1021,6 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
- return -ENOTSUPP;
-
/* Set the config options which we are interested in setting */
conf = &(sdata->u.mesh.mshcfg);
if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask))
@@ -1104,9 +1068,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_AP)
- return -EINVAL;
-
if (params->use_cts_prot >= 0) {
sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
changed |= BSS_CHANGED_ERP_CTS_PROT;
@@ -1181,91 +1142,6 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
}
-static int set_mgmt_extra_ie_sta(struct ieee80211_sub_if_data *sdata,
- u8 subtype, u8 *ies, size_t ies_len)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-
- switch (subtype) {
- case IEEE80211_STYPE_PROBE_REQ >> 4:
- if (local->ops->hw_scan)
- break;
- kfree(ifmgd->ie_probereq);
- ifmgd->ie_probereq = ies;
- ifmgd->ie_probereq_len = ies_len;
- return 0;
- case IEEE80211_STYPE_PROBE_RESP >> 4:
- kfree(ifmgd->ie_proberesp);
- ifmgd->ie_proberesp = ies;
- ifmgd->ie_proberesp_len = ies_len;
- return 0;
- case IEEE80211_STYPE_AUTH >> 4:
- kfree(ifmgd->ie_auth);
- ifmgd->ie_auth = ies;
- ifmgd->ie_auth_len = ies_len;
- return 0;
- case IEEE80211_STYPE_ASSOC_REQ >> 4:
- kfree(ifmgd->ie_assocreq);
- ifmgd->ie_assocreq = ies;
- ifmgd->ie_assocreq_len = ies_len;
- return 0;
- case IEEE80211_STYPE_REASSOC_REQ >> 4:
- kfree(ifmgd->ie_reassocreq);
- ifmgd->ie_reassocreq = ies;
- ifmgd->ie_reassocreq_len = ies_len;
- return 0;
- case IEEE80211_STYPE_DEAUTH >> 4:
- kfree(ifmgd->ie_deauth);
- ifmgd->ie_deauth = ies;
- ifmgd->ie_deauth_len = ies_len;
- return 0;
- case IEEE80211_STYPE_DISASSOC >> 4:
- kfree(ifmgd->ie_disassoc);
- ifmgd->ie_disassoc = ies;
- ifmgd->ie_disassoc_len = ies_len;
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int ieee80211_set_mgmt_extra_ie(struct wiphy *wiphy,
- struct net_device *dev,
- struct mgmt_extra_ie_params *params)
-{
- struct ieee80211_sub_if_data *sdata;
- u8 *ies;
- size_t ies_len;
- int ret = -EOPNOTSUPP;
-
- if (params->ies) {
- ies = kmemdup(params->ies, params->ies_len, GFP_KERNEL);
- if (ies == NULL)
- return -ENOMEM;
- ies_len = params->ies_len;
- } else {
- ies = NULL;
- ies_len = 0;
- }
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- ret = set_mgmt_extra_ie_sta(sdata, params->subtype,
- ies, ies_len);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret)
- kfree(ies);
- return ret;
-}
-
#ifdef CONFIG_PM
static int ieee80211_suspend(struct wiphy *wiphy)
{
@@ -1287,9 +1163,6 @@ static int ieee80211_scan(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata;
- if (!netif_running(dev))
- return -ENETDOWN;
-
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
@@ -1300,6 +1173,119 @@ static int ieee80211_scan(struct wiphy *wiphy,
return ieee80211_request_scan(sdata, req);
}
+static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_auth_request *req)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ switch (req->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_FT:
+ sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN);
+ sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
+ sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
+
+ /* TODO: req->chan */
+ sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
+
+ if (req->ssid) {
+ sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
+ memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
+ sdata->u.mgd.ssid_len = req->ssid_len;
+ sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
+ }
+
+ kfree(sdata->u.mgd.sme_auth_ie);
+ sdata->u.mgd.sme_auth_ie = NULL;
+ sdata->u.mgd.sme_auth_ie_len = 0;
+ if (req->ie) {
+ sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL);
+ if (sdata->u.mgd.sme_auth_ie == NULL)
+ return -ENOMEM;
+ memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len);
+ sdata->u.mgd.sme_auth_ie_len = req->ie_len;
+ }
+
+ sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
+ sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE;
+ ieee80211_sta_req_auth(sdata);
+ return 0;
+}
+
+static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_request *req)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 ||
+ !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED))
+ return -ENOLINK; /* not authenticated */
+
+ sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
+ sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
+
+ /* TODO: req->chan */
+ sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
+
+ if (req->ssid) {
+ sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
+ memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
+ sdata->u.mgd.ssid_len = req->ssid_len;
+ sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
+ } else
+ sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
+
+ ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
+ if (ret)
+ return ret;
+
+ sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
+ sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
+ ieee80211_sta_req_auth(sdata);
+ return 0;
+}
+
+static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_deauth_request *req)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ /* TODO: req->ie */
+ return ieee80211_sta_deauthenticate(sdata, req->reason_code);
+}
+
+static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_disassoc_request *req)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ /* TODO: req->ie */
+ return ieee80211_sta_disassociate(sdata, req->reason_code);
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -1329,8 +1315,11 @@ struct cfg80211_ops mac80211_config_ops = {
.change_bss = ieee80211_change_bss,
.set_txq_params = ieee80211_set_txq_params,
.set_channel = ieee80211_set_channel,
- .set_mgmt_extra_ie = ieee80211_set_mgmt_extra_ie,
.suspend = ieee80211_suspend,
.resume = ieee80211_resume,
.scan = ieee80211_scan,
+ .auth = ieee80211_auth,
+ .assoc = ieee80211_assoc,
+ .deauth = ieee80211_deauth,
+ .disassoc = ieee80211_disassoc,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e37f557de3f3..210b9b6fecd2 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -40,6 +40,10 @@ static const struct file_operations name## _ops = { \
local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \
local, &name## _ops);
+#define DEBUGFS_ADD_MODE(name, mode) \
+ local->debugfs.name = debugfs_create_file(#name, mode, phyd, \
+ local, &name## _ops);
+
#define DEBUGFS_DEL(name) \
debugfs_remove(local->debugfs.name); \
local->debugfs.name = NULL;
@@ -113,6 +117,24 @@ static const struct file_operations tsf_ops = {
.open = mac80211_open_file_generic
};
+static ssize_t reset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+
+ rtnl_lock();
+ __ieee80211_suspend(&local->hw);
+ __ieee80211_resume(&local->hw);
+ rtnl_unlock();
+
+ return count;
+}
+
+static const struct file_operations reset_ops = {
+ .write = reset_write,
+ .open = mac80211_open_file_generic,
+};
+
/* statistics stuff */
#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -254,6 +276,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(tsf);
+ DEBUGFS_ADD_MODE(reset, 0200);
statsd = debugfs_create_dir("statistics", phyd);
local->debugfs.statistics = statsd;
@@ -308,6 +331,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
DEBUGFS_DEL(total_ps_buffered);
DEBUGFS_DEL(wep_iv);
DEBUGFS_DEL(tsf);
+ DEBUGFS_DEL(reset);
DEBUGFS_STATS_DEL(transmitted_fragment_count);
DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f4becc12904e..3201e1f96365 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -812,8 +812,9 @@ int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata)
ifibss->ibss_join_req = jiffies;
ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+ set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
- return ieee80211_sta_find_ibss(sdata);
+ return 0;
}
int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index fbb91f1aebb2..e6ed78cb16b3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -149,11 +149,6 @@ struct ieee80211_tx_data {
struct ieee80211_channel *channel;
- /* Extra fragments (in addition to the first fragment
- * in skb) */
- struct sk_buff **extra_frag;
- int num_extra_frag;
-
u16 ethertype;
unsigned int flags;
};
@@ -189,12 +184,6 @@ struct ieee80211_rx_data {
u16 tkip_iv16;
};
-struct ieee80211_tx_stored_packet {
- struct sk_buff *skb;
- struct sk_buff **extra_frag;
- int num_extra_frag;
-};
-
struct beacon_data {
u8 *head, *tail;
int head_len, tail_len;
@@ -247,8 +236,9 @@ struct mesh_preq_queue {
#define IEEE80211_STA_ASSOCIATED BIT(4)
#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
#define IEEE80211_STA_CREATE_IBSS BIT(6)
-#define IEEE80211_STA_MIXED_CELL BIT(7)
+/* hole at 7, please re-use */
#define IEEE80211_STA_WMM_ENABLED BIT(8)
+/* hole at 9, please re-use */
#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
@@ -256,6 +246,7 @@ struct mesh_preq_queue {
#define IEEE80211_STA_TKIP_WEP_USED BIT(14)
#define IEEE80211_STA_CSA_RECEIVED BIT(15)
#define IEEE80211_STA_MFP_ENABLED BIT(16)
+#define IEEE80211_STA_EXT_SME BIT(17)
/* flags for MLME request */
#define IEEE80211_STA_REQ_SCAN 0
#define IEEE80211_STA_REQ_DIRECT_PROBE 1
@@ -266,12 +257,14 @@ struct mesh_preq_queue {
#define IEEE80211_AUTH_ALG_OPEN BIT(0)
#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
#define IEEE80211_AUTH_ALG_LEAP BIT(2)
+#define IEEE80211_AUTH_ALG_FT BIT(3)
struct ieee80211_if_managed {
struct timer_list timer;
struct timer_list chswitch_timer;
struct work_struct work;
struct work_struct chswitch_work;
+ struct work_struct beacon_loss_work;
u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
@@ -305,6 +298,7 @@ struct ieee80211_if_managed {
unsigned long request;
unsigned long last_probe;
+ unsigned long last_beacon;
unsigned int flags;
@@ -321,20 +315,8 @@ struct ieee80211_if_managed {
int wmm_last_param_set;
/* Extra IE data for management frames */
- u8 *ie_probereq;
- size_t ie_probereq_len;
- u8 *ie_proberesp;
- size_t ie_proberesp_len;
- u8 *ie_auth;
- size_t ie_auth_len;
- u8 *ie_assocreq;
- size_t ie_assocreq_len;
- u8 *ie_reassocreq;
- size_t ie_reassocreq_len;
- u8 *ie_deauth;
- size_t ie_deauth_len;
- u8 *ie_disassoc;
- size_t ie_disassoc_len;
+ u8 *sme_auth_ie;
+ size_t sme_auth_ie_len;
};
enum ieee80211_ibss_flags {
@@ -421,7 +403,6 @@ struct ieee80211_if_mesh {
*
* @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
* @IEEE80211_SDATA_PROMISC: interface is promisc
- * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
* @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
* @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
* associated stations and deliver multicast frames both
@@ -430,9 +411,8 @@ struct ieee80211_if_mesh {
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
IEEE80211_SDATA_PROMISC = BIT(1),
- IEEE80211_SDATA_USERSPACE_MLME = BIT(2),
- IEEE80211_SDATA_OPERATING_GMODE = BIT(3),
- IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4),
+ IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
+ IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
};
struct ieee80211_sub_if_data {
@@ -598,6 +578,8 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_PS,
IEEE80211_QUEUE_STOP_REASON_CSA,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+ IEEE80211_QUEUE_STOP_REASON_PENDING,
};
struct ieee80211_master_priv {
@@ -612,12 +594,7 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
- /* AC queue corresponding to each AMPDU queue */
- s8 ampdu_ac_queue[IEEE80211_MAX_AMPDU_QUEUES];
- unsigned int amdpu_ac_stop_refcnt[IEEE80211_MAX_AMPDU_QUEUES];
-
- unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES +
- IEEE80211_MAX_AMPDU_QUEUES];
+ unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
spinlock_t queue_stop_reason_lock;
@@ -654,11 +631,17 @@ struct ieee80211_local {
struct sta_info *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
- unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
- unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
- struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
+ struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
+ /*
+ * This lock is used to prevent concurrent A-MPDU
+ * session start/stop processing, this thus also
+ * synchronises the ->ampdu_action() callback to
+ * drivers and limits it to one at a time.
+ */
+ spinlock_t ampdu_lock;
+
/* number of interfaces with corresponding IFF_ flags */
atomic_t iff_allmultis, iff_promiscs;
@@ -774,6 +757,7 @@ struct ieee80211_local {
struct dentry *total_ps_buffered;
struct dentry *wep_iv;
struct dentry *tsf;
+ struct dentry *reset;
struct dentry *statistics;
struct local_debugfsdentries_statsdentries {
struct dentry *transmitted_fragment_count;
@@ -969,7 +953,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_rx_status *rx_status);
int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
- char *ie, size_t len);
+ const char *ie, size_t len);
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_scan_failed(struct ieee80211_local *local);
@@ -1053,8 +1037,19 @@ void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
u8 pwr_constr_elem_len);
/* Suspend/resume */
+#ifdef CONFIG_PM
int __ieee80211_suspend(struct ieee80211_hw *hw);
int __ieee80211_resume(struct ieee80211_hw *hw);
+#else
+static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+static inline int __ieee80211_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+#endif
/* utility functions/constants */
extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1081,6 +1076,9 @@ void ieee80211_dynamic_ps_timer(unsigned long data);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
int powersave);
+void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_hdr *hdr);
+void ieee80211_beacon_loss_work(struct work_struct *work);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9f27b9cadbe..91e8e1bacaaa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -261,8 +261,7 @@ static int ieee80211_open(struct net_device *dev)
ieee80211_bss_info_change_notify(sdata, changed);
ieee80211_enable_keys(sdata);
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
@@ -478,6 +477,9 @@ static int ieee80211_stop(struct net_device *dev)
*/
cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
+
+ cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
+
/*
* When we get here, the interface is marked down.
* Call synchronize_rcu() to wait for the RX path
@@ -653,13 +655,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
kfree(sdata->u.mgd.extra_ie);
kfree(sdata->u.mgd.assocreq_ies);
kfree(sdata->u.mgd.assocresp_ies);
- kfree(sdata->u.mgd.ie_probereq);
- kfree(sdata->u.mgd.ie_proberesp);
- kfree(sdata->u.mgd.ie_auth);
- kfree(sdata->u.mgd.ie_assocreq);
- kfree(sdata->u.mgd.ie_reassocreq);
- kfree(sdata->u.mgd.ie_deauth);
- kfree(sdata->u.mgd.ie_disassoc);
+ kfree(sdata->u.mgd.sme_auth_ie);
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f38db4d37e5d..a6f1d8a869bc 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -161,12 +161,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
if (WARN_ON(!netif_running(sdata->dev)))
return 0;
- if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
- return -EINVAL;
-
- if (!local->ops->config_interface)
- return 0;
-
memset(&conf, 0, sizeof(conf));
if (sdata->vif.type == NL80211_IFTYPE_STATION)
@@ -183,6 +177,9 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
return -EINVAL;
}
+ if (!local->ops->config_interface)
+ return 0;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
@@ -224,9 +221,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
}
}
- if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
- return -EINVAL;
-
conf.changed = changed;
return local->ops->config_interface(local_to_hw(local),
@@ -780,13 +774,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
setup_timer(&local->dynamic_ps_timer,
ieee80211_dynamic_ps_timer, (unsigned long) local);
- for (i = 0; i < IEEE80211_MAX_AMPDU_QUEUES; i++)
- local->ampdu_ac_queue[i] = -1;
- /* using an s8 won't work with more than that */
- BUILD_BUG_ON(IEEE80211_MAX_AMPDU_QUEUES > 127);
-
sta_info_init(local);
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+ skb_queue_head_init(&local->pending[i]);
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
tasklet_disable(&local->tx_pending_tasklet);
@@ -799,6 +790,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
+ spin_lock_init(&local->ampdu_lock);
+
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -876,10 +869,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
*/
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
- if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
- hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
- if (hw->queues < 4)
- hw->ampdu_queues = 0;
mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
"wmaster%d", ieee80211_master_setup,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 841b8450b3de..7ecda9d59d8a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,7 +30,7 @@
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
-#define IEEE80211_PROBE_INTERVAL (60 * HZ)
+#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
/* utils */
@@ -82,38 +82,23 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
/* frame sending functions */
-static void add_extra_ies(struct sk_buff *skb, u8 *ies, size_t ies_len)
-{
- if (ies)
- memcpy(skb_put(skb, ies_len), ies, ies_len);
-}
-
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos, *ies, *ht_ie, *e_ies;
+ u8 *pos, *ies, *ht_ie;
int i, len, count, rates_len, supp_rates_len;
u16 capab;
struct ieee80211_bss *bss;
int wmm = 0;
struct ieee80211_supported_band *sband;
u32 rates = 0;
- size_t e_ies_len;
-
- if (ifmgd->flags & IEEE80211_IBSS_PREV_BSSID_SET) {
- e_ies = sdata->u.mgd.ie_reassocreq;
- e_ies_len = sdata->u.mgd.ie_reassocreq_len;
- } else {
- e_ies = sdata->u.mgd.ie_assocreq;
- e_ies_len = sdata->u.mgd.ie_assocreq_len;
- }
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 200 + ifmgd->extra_ie_len +
- ifmgd->ssid_len + e_ies_len);
+ ifmgd->ssid_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
"frame\n", sdata->dev->name);
@@ -304,8 +289,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
}
- add_extra_ies(skb, e_ies, e_ies_len);
-
kfree(ifmgd->assocreq_ies);
ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies;
ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL);
@@ -323,19 +306,8 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *ies;
- size_t ies_len;
-
- if (stype == IEEE80211_STYPE_DEAUTH) {
- ies = sdata->u.mgd.ie_deauth;
- ies_len = sdata->u.mgd.ie_deauth_len;
- } else {
- ies = sdata->u.mgd.ie_disassoc;
- ies_len = sdata->u.mgd.ie_disassoc_len;
- }
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) +
- ies_len);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for "
"deauth/disassoc frame\n", sdata->dev->name);
@@ -353,8 +325,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
/* u.deauth.reason_code == u.disassoc.reason_code */
mgmt->u.deauth.reason_code = cpu_to_le16(reason);
- add_extra_ies(skb, ies, ies_len);
-
ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
}
@@ -640,6 +610,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
bss->cbss.capability, bss->has_erp_value, bss->erp_value);
+ cfg80211_hold_bss(&bss->cbss);
+
ieee80211_rx_bss_put(local, bss);
}
@@ -682,6 +654,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
ifmgd->direct_probe_tries++;
if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -697,6 +670,13 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
sdata->local->hw.conf.channel->center_freq,
ifmgd->ssid, ifmgd->ssid_len);
+
+ /*
+ * We might have a pending scan which had no chance to run yet
+ * due to state == IEEE80211_STA_MLME_DIRECT_PROBE.
+ * Hence, queue the STAs work again
+ */
+ queue_work(local->hw.workqueue, &ifmgd->work);
return;
}
@@ -721,6 +701,9 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
+ u8 *ies;
+ size_t ies_len;
ifmgd->auth_tries++;
if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -732,6 +715,13 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
sdata->local->hw.conf.channel->center_freq,
ifmgd->ssid, ifmgd->ssid_len);
+
+ /*
+ * We might have a pending scan which had no chance to run yet
+ * due to state == IEEE80211_STA_MLME_AUTHENTICATE.
+ * Hence, queue the STAs work again
+ */
+ queue_work(local->hw.workqueue, &ifmgd->work);
return;
}
@@ -739,7 +729,14 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: authenticate with AP %pM\n",
sdata->dev->name, ifmgd->bssid);
- ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, NULL, 0,
+ if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
+ ies = ifmgd->sme_auth_ie;
+ ies_len = ifmgd->sme_auth_ie_len;
+ } else {
+ ies = NULL;
+ ies_len = 0;
+ }
+ ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len,
ifmgd->bssid, 0);
ifmgd->auth_transaction = 2;
@@ -756,6 +753,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_conf *conf = &local_to_hw(local)->conf;
+ struct ieee80211_bss *bss;
struct sta_info *sta;
u32 changed = 0, config_changed = 0;
@@ -779,6 +778,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_tear_down_BA_sessions(sta);
+ bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
+ conf->channel->center_freq,
+ ifmgd->ssid, ifmgd->ssid_len);
+
+ if (bss) {
+ cfg80211_unhold_bss(&bss->cbss);
+ ieee80211_rx_bss_put(local, bss);
+ }
+
if (self_disconnected) {
if (deauth)
ieee80211_send_deauth_disassoc(sdata,
@@ -854,7 +862,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
int wep_privacy;
int privacy_invoked;
- if (!ifmgd || (ifmgd->flags & IEEE80211_STA_MIXED_CELL))
+ if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME))
return 0;
bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
@@ -878,6 +886,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
ifmgd->assoc_tries++;
if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
@@ -889,6 +898,12 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
sdata->local->hw.conf.channel->center_freq,
ifmgd->ssid, ifmgd->ssid_len);
+ /*
+ * We might have a pending scan which had no chance to run yet
+ * due to state == IEEE80211_STA_MLME_ASSOCIATE.
+ * Hence, queue the STAs work again
+ */
+ queue_work(local->hw.workqueue, &ifmgd->work);
return;
}
@@ -907,13 +922,55 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
}
+void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_hdr *hdr)
+{
+ /*
+ * We can postpone the mgd.timer whenever receiving unicast frames
+ * from AP because we know that the connection is working both ways
+ * at that time. But multicast frames (and hence also beacons) must
+ * be ignored here, because we need to trigger the timer during
+ * data idle periods for sending the periodical probe request to
+ * the AP.
+ */
+ if (!is_multicast_ether_addr(hdr->addr1))
+ mod_timer(&sdata->u.mgd.timer,
+ jiffies + IEEE80211_MONITORING_INTERVAL);
+}
+
+void ieee80211_beacon_loss_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.beacon_loss_work);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
+ "- sending probe request\n", sdata->dev->name,
+ sdata->u.mgd.bssid);
+
+ ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
+ ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
+ ifmgd->ssid_len, NULL, 0);
+
+ mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL);
+}
+
+void ieee80211_beacon_loss(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ queue_work(sdata->local->hw.workqueue,
+ &sdata->u.mgd.beacon_loss_work);
+}
+EXPORT_SYMBOL(ieee80211_beacon_loss);
static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- int disassoc;
+ bool disassoc = false;
/* TODO: start monitoring current AP signal quality and number of
* missed beacons. Scan other channels every now and then and search
@@ -928,36 +985,45 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
if (!sta) {
printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
sdata->dev->name, ifmgd->bssid);
- disassoc = 1;
- } else {
- disassoc = 0;
- if (time_after(jiffies,
- sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
- if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
- printk(KERN_DEBUG "%s: No ProbeResp from "
- "current AP %pM - assume out of "
- "range\n",
- sdata->dev->name, ifmgd->bssid);
- disassoc = 1;
- } else
- ieee80211_send_probe_req(sdata, ifmgd->bssid,
- ifmgd->ssid,
- ifmgd->ssid_len,
- NULL, 0);
- ifmgd->flags ^= IEEE80211_STA_PROBEREQ_POLL;
- } else {
- ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
- if (time_after(jiffies, ifmgd->last_probe +
- IEEE80211_PROBE_INTERVAL)) {
- ifmgd->last_probe = jiffies;
- ieee80211_send_probe_req(sdata, ifmgd->bssid,
- ifmgd->ssid,
- ifmgd->ssid_len,
- NULL, 0);
- }
- }
+ disassoc = true;
+ goto unlock;
}
+ if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
+ time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
+ printk(KERN_DEBUG "%s: no probe response from AP %pM "
+ "- disassociating\n",
+ sdata->dev->name, ifmgd->bssid);
+ disassoc = true;
+ ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
+ goto unlock;
+ }
+
+ /*
+ * Beacon filtering is only enabled with power save and then the
+ * stack should not check for beacon loss.
+ */
+ if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) &&
+ (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
+ time_after(jiffies,
+ ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
+ printk(KERN_DEBUG "%s: beacon loss from AP %pM "
+ "- sending probe request\n",
+ sdata->dev->name, ifmgd->bssid);
+ ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
+ ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
+ ifmgd->ssid_len, NULL, 0);
+ goto unlock;
+
+ }
+
+ if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) {
+ ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
+ ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
+ ifmgd->ssid_len, NULL, 0);
+ }
+
+ unlock:
rcu_read_unlock();
if (disassoc)
@@ -975,7 +1041,11 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
ifmgd->flags |= IEEE80211_STA_AUTHENTICATED;
- ieee80211_associate(sdata);
+ if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
+ /* Wait for SME to request association */
+ ifmgd->state = IEEE80211_STA_MLME_DISABLED;
+ } else
+ ieee80211_associate(sdata);
}
@@ -1061,12 +1131,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
switch (ifmgd->auth_alg) {
case WLAN_AUTH_OPEN:
case WLAN_AUTH_LEAP:
+ case WLAN_AUTH_FT:
ieee80211_auth_completed(sdata);
+ cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
break;
case WLAN_AUTH_SHARED_KEY:
- if (ifmgd->auth_transaction == 4)
+ if (ifmgd->auth_transaction == 4) {
ieee80211_auth_completed(sdata);
- else
+ cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
+ } else
ieee80211_auth_challenge(sdata, mgmt, len);
break;
}
@@ -1092,9 +1165,10 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n",
sdata->dev->name, reason_code);
- if (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE ||
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
+ if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
+ (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
+ ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE ||
+ ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) {
ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
mod_timer(&ifmgd->timer, jiffies +
IEEE80211_RETRY_AUTH_INTERVAL);
@@ -1102,6 +1176,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, true, false, 0);
ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
+ cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len);
}
@@ -1124,13 +1199,15 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
sdata->dev->name, reason_code);
- if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
+ if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
+ ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE;
mod_timer(&ifmgd->timer, jiffies +
IEEE80211_RETRY_AUTH_INTERVAL);
}
ieee80211_set_disassoc(sdata, false, false, reason_code);
+ cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len);
}
@@ -1346,7 +1423,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
bss_conf->assoc_capability = capab_info;
ieee80211_set_associated(sdata, changed);
+ /*
+ * initialise the time of last beacon to be the association time,
+ * otherwise beacon loss check will trigger immediately
+ */
+ ifmgd->last_beacon = jiffies;
+
ieee80211_associated(sdata);
+ cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
}
@@ -1393,9 +1477,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
size_t len,
struct ieee80211_rx_status *rx_status)
{
+ struct ieee80211_if_managed *ifmgd;
size_t baselen;
struct ieee802_11_elems elems;
+ ifmgd = &sdata->u.mgd;
+
if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
@@ -1410,11 +1497,14 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
/* direct probe may be part of the association flow */
if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
- &sdata->u.mgd.request)) {
+ &ifmgd->request)) {
printk(KERN_DEBUG "%s direct probe responded\n",
sdata->dev->name);
ieee80211_authenticate(sdata);
}
+
+ if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
+ ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
}
static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
@@ -1636,6 +1726,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY;
else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP)
ifmgd->auth_alg = WLAN_AUTH_LEAP;
+ else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT)
+ ifmgd->auth_alg = WLAN_AUTH_FT;
else
ifmgd->auth_alg = WLAN_AUTH_OPEN;
ifmgd->auth_transaction = -1;
@@ -1659,7 +1751,8 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
u16 capa_val = WLAN_CAPABILITY_ESS;
struct ieee80211_channel *chan = local->oper_channel;
- if (ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL |
+ if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
+ ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL |
IEEE80211_STA_AUTO_BSSID_SEL |
IEEE80211_STA_AUTO_CHANNEL_SEL)) {
capa_mask |= WLAN_CAPABILITY_PRIVACY;
@@ -1822,6 +1915,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ifmgd = &sdata->u.mgd;
INIT_WORK(&ifmgd->work, ieee80211_sta_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
+ INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
setup_timer(&ifmgd->timer, ieee80211_sta_timer,
(unsigned long) sdata);
setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
@@ -1834,7 +1928,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ifmgd->flags |= IEEE80211_STA_CREATE_IBSS |
IEEE80211_STA_AUTO_BSSID_SEL |
IEEE80211_STA_AUTO_CHANNEL_SEL;
- if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
+ if (sdata->local->hw.queues >= 4)
ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
}
@@ -1856,7 +1950,11 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata)
ieee80211_set_disassoc(sdata, true, true,
WLAN_REASON_DEAUTH_LEAVING);
- set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
+ if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) ||
+ ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
+ set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
+ else if (ifmgd->flags & IEEE80211_STA_EXT_SME)
+ set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
queue_work(local->hw.workqueue, &ifmgd->work);
}
}
@@ -1865,8 +1963,6 @@ int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
-
if (ifmgd->ssid_len)
ifmgd->flags |= IEEE80211_STA_SSID_SET;
else
@@ -1885,6 +1981,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
ifmgd = &sdata->u.mgd;
if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) {
+ /*
+ * Do not use reassociation if SSID is changed (different ESS).
+ */
+ ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid));
memcpy(ifmgd->ssid, ssid, len);
ifmgd->ssid_len = len;
@@ -1923,7 +2023,8 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
return ieee80211_sta_commit(sdata);
}
-int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
+int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
+ const char *ie, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 44525f517077..027302326498 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,6 +10,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
struct ieee80211_sub_if_data *sdata;
struct ieee80211_if_init_conf conf;
struct sta_info *sta;
+ unsigned long flags;
+
+ ieee80211_stop_queues_by_reason(hw,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND);
flush_workqueue(local->hw.workqueue);
@@ -17,10 +21,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
list_for_each_entry(sdata, &local->interfaces, list)
ieee80211_disable_keys(sdata);
- /* remove STAs */
- list_for_each_entry(sta, &local->sta_list, list) {
+ /* Tear down aggregation sessions */
+
+ rcu_read_lock();
+
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ set_sta_flags(sta, WLAN_STA_SUSPEND);
+ ieee80211_sta_tear_down_BA_sessions(sta);
+ }
+ }
- if (local->ops->sta_notify) {
+ rcu_read_unlock();
+
+ /* remove STAs */
+ if (local->ops->sta_notify) {
+ spin_lock_irqsave(&local->sta_lock, flags);
+ list_for_each_entry(sta, &local->sta_list, list) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
@@ -29,11 +46,11 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
local->ops->sta_notify(hw, &sdata->vif,
STA_NOTIFY_REMOVE, &sta->sta);
}
+ spin_unlock_irqrestore(&local->sta_lock, flags);
}
/* remove all interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
-
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
netif_running(sdata->dev)) {
@@ -61,6 +78,7 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
struct ieee80211_sub_if_data *sdata;
struct ieee80211_if_init_conf conf;
struct sta_info *sta;
+ unsigned long flags;
int res;
/* restart hardware */
@@ -72,7 +90,6 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
/* add interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
-
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
netif_running(sdata->dev)) {
@@ -84,9 +101,9 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
}
/* add STAs back */
- list_for_each_entry(sta, &local->sta_list, list) {
-
- if (local->ops->sta_notify) {
+ if (local->ops->sta_notify) {
+ spin_lock_irqsave(&local->sta_lock, flags);
+ list_for_each_entry(sta, &local->sta_list, list) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
@@ -95,8 +112,21 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
local->ops->sta_notify(hw, &sdata->vif,
STA_NOTIFY_ADD, &sta->sta);
}
+ spin_unlock_irqrestore(&local->sta_lock, flags);
}
+ /* Clear Suspend state so that ADDBA requests can be processed */
+
+ rcu_read_lock();
+
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ clear_sta_flags(sta, WLAN_STA_SUSPEND);
+ }
+ }
+
+ rcu_read_unlock();
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (netif_running(sdata->dev))
@@ -113,5 +143,37 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
ieee80211_configure_filter(local);
netif_addr_unlock_bh(local->mdev);
+ /* Finally also reconfigure all the BSS information */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ u32 changed = ~0;
+ if (!netif_running(sdata->dev))
+ continue;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ /* disable beacon change bits */
+ changed &= ~IEEE80211_IFCC_BEACON;
+ /* fall through */
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ WARN_ON(ieee80211_if_config(sdata, changed));
+ ieee80211_bss_info_change_notify(sdata, ~0);
+ break;
+ case NL80211_IFTYPE_WDS:
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MONITOR:
+ /* ignore virtual */
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case __NL80211_IFTYPE_AFTER_LAST:
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ ieee80211_wake_queues_by_reason(hw,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+
return 0;
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3fa7ab285066..4641f00a1e5c 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -219,10 +219,12 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 1;
}
- if (sta && sdata->force_unicast_rateidx > -1)
+ if (sta && sdata->force_unicast_rateidx > -1) {
info->control.rates[0].idx = sdata->force_unicast_rateidx;
- else
+ } else {
ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+ info->flags |= IEEE80211_TX_INTFL_RCALGO;
+ }
/*
* try to enforce the maximum rate the user wanted
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b9164c9a9563..2ab5ad9e71ce 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -44,8 +44,10 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
+ if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
+ ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 66f7ecf51b92..64ebe664effc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -142,6 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_FLAGS */
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
*pos |= IEEE80211_RADIOTAP_F_FCS;
+ if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+ *pos |= IEEE80211_RADIOTAP_F_BADFCS;
if (status->flag & RX_FLAG_SHORTPRE)
*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
pos++;
@@ -204,9 +206,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* ensure 2 byte alignment for the 2 byte field as required */
if ((pos - (unsigned char *)rthdr) & 1)
pos++;
- /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
- if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
- *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
+ if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
+ *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
pos += 2;
}
@@ -849,12 +850,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- sta->last_rx = jiffies;
+ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+ ieee80211_is_beacon(hdr->frame_control)) {
+ rx->sdata->u.mgd.last_beacon = jiffies;
+ } else
+ sta->last_rx = jiffies;
}
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
return RX_CONTINUE;
+ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
+ ieee80211_sta_rx_notify(rx->sdata, hdr);
+
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
sta->last_signal = rx->status->signal;
@@ -1876,18 +1884,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
if (ieee80211_vif_is_mesh(&sdata->vif))
return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_ADHOC)
- return RX_DROP_MONITOR;
-
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
- return RX_DROP_MONITOR;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
- }
- return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
+ return RX_DROP_MONITOR;
}
static void ieee80211_rx_michael_mic_report(struct net_device *dev,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5030a3c87509..3bf9839f5916 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -214,6 +214,66 @@ void ieee80211_scan_failed(struct ieee80211_local *local)
local->scan_req = NULL;
}
+/*
+ * inform AP that we will go to sleep so that it will buffer the frames
+ * while we scan
+ */
+static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ bool ps = false;
+
+ /* FIXME: what to do when local->pspolling is true? */
+
+ del_timer_sync(&local->dynamic_ps_timer);
+ cancel_work_sync(&local->dynamic_ps_enable_work);
+
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ ps = true;
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+
+ if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ /*
+ * If power save was enabled, no need to send a nullfunc
+ * frame because AP knows that we are sleeping. But if the
+ * hardware is creating the nullfunc frame for power save
+ * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
+ * enabled) and power save was enabled, the firmware just
+ * sent a null frame with power save disabled. So we need
+ * to send a new nullfunc frame to inform the AP that we
+ * are again sleeping.
+ */
+ ieee80211_send_nullfunc(local, sdata, 1);
+}
+
+/* inform AP that we are awake again, unless power save is enabled */
+static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!local->powersave)
+ ieee80211_send_nullfunc(local, sdata, 0);
+ else {
+ /*
+ * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
+ * will send a nullfunc frame with the powersave bit set
+ * even though the AP already knows that we are sleeping.
+ * This could be avoided by sending a null frame with power
+ * save bit disabled before enabling the power save, but
+ * this doesn't gain anything.
+ *
+ * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
+ * to send a nullfunc frame because AP already knows that
+ * we are sleeping, let's just enable power save mode in
+ * hardware.
+ */
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+}
+
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -268,7 +328,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
/* Tell AP we're back */
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
- ieee80211_send_nullfunc(local, sdata, 0);
+ ieee80211_scan_ps_disable(sdata);
netif_tx_wake_all_queues(sdata->dev);
}
} else
@@ -409,6 +469,19 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
return 0;
}
+ /*
+ * Hardware/driver doesn't support hw_scan, so use software
+ * scanning instead. First send a nullfunc frame with power save
+ * bit on so that AP will buffer the frames for us while we are not
+ * listening, then send probe requests to each channel and wait for
+ * the responses. After all channels are scanned, tune back to the
+ * original channel and send a nullfunc frame with power save bit
+ * off to trigger the AP to send us all the buffered frames.
+ *
+ * Note that while local->sw_scanning is true everything else but
+ * nullfunc frames and probe requests will be dropped in
+ * ieee80211_tx_h_check_assoc().
+ */
local->sw_scanning = true;
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(local_to_hw(local));
@@ -428,7 +501,7 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
netif_tx_stop_all_queues(sdata->dev);
- ieee80211_send_nullfunc(local, sdata, 1);
+ ieee80211_scan_ps_enable(sdata);
}
} else
netif_tx_stop_all_queues(sdata->dev);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4ba3c540fcf3..c5f14e6bbde2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -203,17 +203,6 @@ void sta_info_destroy(struct sta_info *sta)
if (tid_rx)
tid_rx->shutdown = true;
- /*
- * The stop callback cannot find this station any more, but
- * it didn't complete its work -- start the queue if necessary
- */
- if (sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_INITIATOR_MSK &&
- sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_REQ_STOP_BA_MSK &&
- local->hw.ampdu_queues)
- ieee80211_wake_queue_by_reason(&local->hw,
- local->hw.queues + sta->tid_to_tx_q[i],
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
spin_unlock_bh(&sta->lock);
/*
@@ -239,6 +228,11 @@ void sta_info_destroy(struct sta_info *sta)
tid_tx = sta->ampdu_mlme.tid_tx[i];
if (tid_tx) {
del_timer_sync(&tid_tx->addba_resp_timer);
+ /*
+ * STA removed while aggregation session being
+ * started? Bit odd, but purge frames anyway.
+ */
+ skb_queue_purge(&tid_tx->pending);
kfree(tid_tx);
}
}
@@ -287,7 +281,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* enable session_timer's data differentiation. refer to
* sta_rx_agg_session_timer_expired for useage */
sta->timer_to_tid[i] = i;
- sta->tid_to_tx_q[i] = -1;
/* rx */
sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_rx[i] = NULL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 1f45573c580c..5534d489f506 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,6 +35,8 @@
* IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
* frame to this station is transmitted.
* @WLAN_STA_MFP: Management frame protection is used with this STA.
+ * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
+ * Used to deny ADDBA requests (both TX and RX).
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
@@ -48,6 +50,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_PSPOLL = 1<<8,
WLAN_STA_CLEAR_PS_FILT = 1<<9,
WLAN_STA_MFP = 1<<10,
+ WLAN_STA_SUSPEND = 1<<11
};
#define STA_TID_NUM 16
@@ -70,11 +73,13 @@ enum ieee80211_sta_info_flags {
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
* @addba_resp_timer: timer for peer's response to addba request
+ * @pending: pending frames queue -- use sta's spinlock to protect
* @ssn: Starting Sequence Number expected to be aggregated.
* @dialog_token: dialog token for aggregation session
*/
struct tid_ampdu_tx {
struct timer_list addba_resp_timer;
+ struct sk_buff_head pending;
u16 ssn;
u8 dialog_token;
};
@@ -201,7 +206,6 @@ struct sta_ampdu_mlme {
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
- * @tid_to_tx_q: map tid to tx queue (invalid == negative values)
* @llid: Local link ID
* @plid: Peer link ID
* @reason: Cancel reason on PLINK_HOLDING state
@@ -276,7 +280,6 @@ struct sta_info {
*/
struct sta_ampdu_mlme ampdu_mlme;
u8 timer_to_tid[STA_TID_NUM];
- s8 tid_to_tx_q[STA_TID_NUM];
#ifdef CONFIG_MAC80211_MESH
/*
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 457238a2f3fc..3fb04a86444d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,8 +34,7 @@
#define IEEE80211_TX_OK 0
#define IEEE80211_TX_AGAIN 1
-#define IEEE80211_TX_FRAG_AGAIN 2
-#define IEEE80211_TX_PENDING 3
+#define IEEE80211_TX_PENDING 2
/* misc utils */
@@ -193,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
if (unlikely(tx->local->sw_scanning) &&
- !ieee80211_is_probe_req(hdr->frame_control))
+ !ieee80211_is_probe_req(hdr->frame_control) &&
+ !ieee80211_is_nullfunc(hdr->frame_control))
+ /*
+ * When software scanning only nullfunc frames (to notify
+ * the sleep state to the AP) and probe requests (for the
+ * active scan) are allowed, all other frames should not be
+ * sent and we should not get here, but if we do
+ * nonetheless, drop them to avoid sending them
+ * off-channel. See the link below and
+ * ieee80211_start_scan() for more.
+ *
+ * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
+ */
return TX_DROP;
if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -690,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
+static int ieee80211_fragment(struct ieee80211_local *local,
+ struct sk_buff *skb, int hdrlen,
+ int frag_threshold)
+{
+ struct sk_buff *tail = skb, *tmp;
+ int per_fragm = frag_threshold - hdrlen - FCS_LEN;
+ int pos = hdrlen + per_fragm;
+ int rem = skb->len - hdrlen - per_fragm;
+
+ if (WARN_ON(rem < 0))
+ return -EINVAL;
+
+ while (rem) {
+ int fraglen = per_fragm;
+
+ if (fraglen > rem)
+ fraglen = rem;
+ rem -= fraglen;
+ tmp = dev_alloc_skb(local->tx_headroom +
+ frag_threshold +
+ IEEE80211_ENCRYPT_HEADROOM +
+ IEEE80211_ENCRYPT_TAILROOM);
+ if (!tmp)
+ return -ENOMEM;
+ tail->next = tmp;
+ tail = tmp;
+ skb_reserve(tmp, local->tx_headroom +
+ IEEE80211_ENCRYPT_HEADROOM);
+ /* copy control information */
+ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
+ skb_copy_queue_mapping(tmp, skb);
+ tmp->priority = skb->priority;
+ tmp->do_not_encrypt = skb->do_not_encrypt;
+ tmp->dev = skb->dev;
+ tmp->iif = skb->iif;
+
+ /* copy header and data */
+ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
+ memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
+
+ pos += fraglen;
+ }
+
+ skb->len = hdrlen + per_fragm;
+ return 0;
+}
+
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- size_t hdrlen, per_fragm, num_fragm, payload_len, left;
- struct sk_buff **frags, *first, *frag;
- int i;
- u16 seq;
- u8 *pos;
+ struct sk_buff *skb = tx->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
int frag_threshold = tx->local->fragmentation_threshold;
+ int hdrlen;
+ int fragnum;
if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
return TX_CONTINUE;
@@ -713,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
return TX_DROP;
- first = tx->skb;
-
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- payload_len = first->len - hdrlen;
- per_fragm = frag_threshold - hdrlen - FCS_LEN;
- num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
-
- frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
- if (!frags)
- goto fail;
-
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
- seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
- pos = first->data + hdrlen + per_fragm;
- left = payload_len - per_fragm;
- for (i = 0; i < num_fragm - 1; i++) {
- struct ieee80211_hdr *fhdr;
- size_t copylen;
-
- if (left <= 0)
- goto fail;
- /* reserve enough extra head and tail room for possible
- * encryption */
- frag = frags[i] =
- dev_alloc_skb(tx->local->tx_headroom +
- frag_threshold +
- IEEE80211_ENCRYPT_HEADROOM +
- IEEE80211_ENCRYPT_TAILROOM);
- if (!frag)
- goto fail;
-
- /* Make sure that all fragments use the same priority so
- * that they end up using the same TX queue */
- frag->priority = first->priority;
+ /* internal error, why is TX_FRAGMENTED set? */
+ if (WARN_ON(skb->len <= frag_threshold))
+ return TX_DROP;
- skb_reserve(frag, tx->local->tx_headroom +
- IEEE80211_ENCRYPT_HEADROOM);
+ /*
+ * Now fragment the frame. This will allocate all the fragments and
+ * chain them (using skb as the first fragment) to skb->next.
+ * During transmission, we will remove the successfully transmitted
+ * fragments from this list. When the low-level driver rejects one
+ * of the fragments then we will simply pretend to accept the skb
+ * but store it away as pending.
+ */
+ if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
+ return TX_DROP;
- /* copy TX information */
- info = IEEE80211_SKB_CB(frag);
- memcpy(info, first->cb, sizeof(frag->cb));
+ /* update duration/seq/flags of fragments */
+ fragnum = 0;
+ do {
+ int next_len;
+ const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
- /* copy/fill in 802.11 header */
- fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
- memcpy(fhdr, first->data, hdrlen);
- fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
+ hdr = (void *)skb->data;
+ info = IEEE80211_SKB_CB(skb);
- if (i == num_fragm - 2) {
- /* clear MOREFRAGS bit for the last fragment */
- fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS);
- } else {
+ if (skb->next) {
+ hdr->frame_control |= morefrags;
+ next_len = skb->next->len;
/*
* No multi-rate retries for fragmented frames, that
* would completely throw off the NAV at other STAs.
@@ -775,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
info->control.rates[4].idx = -1;
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ } else {
+ hdr->frame_control &= ~morefrags;
+ next_len = 0;
}
-
- /* copy data */
- copylen = left > per_fragm ? per_fragm : left;
- memcpy(skb_put(frag, copylen), pos, copylen);
-
- skb_copy_queue_mapping(frag, first);
-
- frag->do_not_encrypt = first->do_not_encrypt;
- frag->dev = first->dev;
- frag->iif = first->iif;
-
- pos += copylen;
- left -= copylen;
- }
- skb_trim(first, hdrlen + per_fragm);
-
- tx->num_extra_frag = num_fragm - 1;
- tx->extra_frag = frags;
+ hdr->duration_id = ieee80211_duration(tx, 0, next_len);
+ hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
+ fragnum++;
+ } while ((skb = skb->next));
return TX_CONTINUE;
-
- fail:
- if (frags) {
- for (i = 0; i < num_fragm - 1; i++)
- if (frags[i])
- dev_kfree_skb(frags[i]);
- kfree(frags);
- }
- I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
- return TX_DROP;
}
static ieee80211_tx_result debug_noinline
@@ -833,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- int next_len, i;
- int group_addr = is_multicast_ether_addr(hdr->addr1);
-
- if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
- hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
- return TX_CONTINUE;
- }
+ struct sk_buff *skb = tx->skb;
+ struct ieee80211_hdr *hdr;
+ int next_len;
+ bool group_addr;
- hdr->duration_id = ieee80211_duration(tx, group_addr,
- tx->extra_frag[0]->len);
+ do {
+ hdr = (void *) skb->data;
+ next_len = skb->next ? skb->next->len : 0;
+ group_addr = is_multicast_ether_addr(hdr->addr1);
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (i + 1 < tx->num_extra_frag)
- next_len = tx->extra_frag[i + 1]->len;
- else
- next_len = 0;
-
- hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
- hdr->duration_id = ieee80211_duration(tx, 0, next_len);
- }
+ hdr->duration_id =
+ ieee80211_duration(tx, group_addr, next_len);
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
@@ -861,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
{
- int i;
+ struct sk_buff *skb = tx->skb;
if (!tx->sta)
return TX_CONTINUE;
tx->sta->tx_packets++;
- tx->sta->tx_fragments++;
- tx->sta->tx_bytes += tx->skb->len;
- if (tx->extra_frag) {
- tx->sta->tx_fragments += tx->num_extra_frag;
- for (i = 0; i < tx->num_extra_frag; i++)
- tx->sta->tx_bytes += tx->extra_frag[i]->len;
- }
+ do {
+ tx->sta->tx_fragments++;
+ tx->sta->tx_bytes += skb->len;
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
@@ -983,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
int hdrlen, tid;
u8 *qc, *state;
+ bool queued = false;
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
@@ -1012,25 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
*/
}
+ /*
+ * If this flag is set to true anywhere, and we get here,
+ * we are doing the needed processing, so remove the flag
+ * now.
+ */
+ info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+
hdr = (struct ieee80211_hdr *) skb->data;
tx->sta = sta_info_get(local, hdr->addr1);
- if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) {
+ if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
+ (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
unsigned long flags;
+ struct tid_ampdu_tx *tid_tx;
+
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
spin_lock_irqsave(&tx->sta->lock, flags);
+ /*
+ * XXX: This spinlock could be fairly expensive, but see the
+ * comment in agg-tx.c:ieee80211_agg_tx_operational().
+ * One way to solve this would be to do something RCU-like
+ * for managing the tid_tx struct and using atomic bitops
+ * for the actual state -- by introducing an actual
+ * 'operational' bit that would be possible. It would
+ * require changing ieee80211_agg_tx_operational() to
+ * set that bit, and changing the way tid_tx is managed
+ * everywhere, including races between that bit and
+ * tid_tx going away (tid_tx being added can be easily
+ * committed to memory before the 'operational' bit).
+ */
+ tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
if (*state == HT_AGG_STATE_OPERATIONAL) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
- if (local->hw.ampdu_queues)
- skb_set_queue_mapping(
- skb, tx->local->hw.queues +
- tx->sta->tid_to_tx_q[tid]);
+ } else if (*state != HT_AGG_STATE_IDLE) {
+ /* in progress */
+ queued = true;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ __skb_queue_tail(&tid_tx->pending, skb);
}
spin_unlock_irqrestore(&tx->sta->lock, flags);
+
+ if (unlikely(queued))
+ return TX_QUEUED;
}
if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1081,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
}
if (unlikely(!dev))
return -ENODEV;
- /* initialises tx with control */
+ /*
+ * initialises tx with control
+ *
+ * return value is safe to ignore here because this function
+ * can only be invoked for multicast frames
+ *
+ * XXX: clean up
+ */
__ieee80211_tx_prepare(tx, skb, dev);
dev_put(dev);
return 0;
}
-static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_tx_data *tx)
+static int __ieee80211_tx(struct ieee80211_local *local,
+ struct sk_buff **skbp,
+ struct sta_info *sta)
{
+ struct sk_buff *skb = *skbp, *next;
struct ieee80211_tx_info *info;
- int ret, i;
+ int ret, len;
+ bool fragm = false;
- if (skb) {
+ local->mdev->trans_start = jiffies;
+
+ while (skb) {
if (ieee80211_queue_stopped(&local->hw,
skb_get_queue_mapping(skb)))
return IEEE80211_TX_PENDING;
- ret = local->ops->tx(local_to_hw(local), skb);
- if (ret)
- return IEEE80211_TX_AGAIN;
- local->mdev->trans_start = jiffies;
- ieee80211_led_tx(local, 1);
- }
- if (tx->extra_frag) {
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (!tx->extra_frag[i])
- continue;
- info = IEEE80211_SKB_CB(tx->extra_frag[i]);
+ info = IEEE80211_SKB_CB(skb);
+
+ if (fragm)
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT);
- if (ieee80211_queue_stopped(&local->hw,
- skb_get_queue_mapping(tx->extra_frag[i])))
- return IEEE80211_TX_FRAG_AGAIN;
-
- ret = local->ops->tx(local_to_hw(local),
- tx->extra_frag[i]);
- if (ret)
- return IEEE80211_TX_FRAG_AGAIN;
- local->mdev->trans_start = jiffies;
- ieee80211_led_tx(local, 1);
- tx->extra_frag[i] = NULL;
+
+ next = skb->next;
+ len = skb->len;
+ ret = local->ops->tx(local_to_hw(local), skb);
+ if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
+ dev_kfree_skb(skb);
+ ret = NETDEV_TX_OK;
}
- kfree(tx->extra_frag);
- tx->extra_frag = NULL;
+ if (ret != NETDEV_TX_OK)
+ return IEEE80211_TX_AGAIN;
+ *skbp = skb = next;
+ ieee80211_led_tx(local, 1);
+ fragm = true;
}
+
return IEEE80211_TX_OK;
}
@@ -1137,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
ieee80211_tx_result res = TX_DROP;
- int i;
#define CALL_TXH(txh) \
res = txh(tx); \
@@ -1161,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
- dev_kfree_skb(skb);
- for (i = 0; i < tx->num_extra_frag; i++)
- if (tx->extra_frag[i])
- dev_kfree_skb(tx->extra_frag[i]);
- kfree(tx->extra_frag);
+ while (skb) {
+ struct sk_buff *next;
+
+ next = skb->next;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1175,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
return 0;
}
-static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
+static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
+ bool txpending)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct sta_info *sta;
struct ieee80211_tx_data tx;
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret, i;
+ struct sk_buff *next;
+ unsigned long flags;
+ int ret, retries;
u16 queue;
queue = skb_get_queue_mapping(skb);
- WARN_ON(test_bit(queue, local->queues_pending));
+ WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
- return 0;
+ return;
}
rcu_read_lock();
@@ -1199,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
/* initialises tx */
res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
- if (res_prepare == TX_DROP) {
+ if (unlikely(res_prepare == TX_DROP)) {
dev_kfree_skb(skb);
rcu_read_unlock();
- return 0;
+ return;
+ } else if (unlikely(res_prepare == TX_QUEUED)) {
+ rcu_read_unlock();
+ return;
}
sta = tx.sta;
@@ -1212,59 +1252,71 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
if (invoke_tx_handlers(&tx))
goto out;
-retry:
- ret = __ieee80211_tx(local, skb, &tx);
- if (ret) {
- struct ieee80211_tx_stored_packet *store;
-
+ retries = 0;
+ retry:
+ ret = __ieee80211_tx(local, &tx.skb, tx.sta);
+ switch (ret) {
+ case IEEE80211_TX_OK:
+ break;
+ case IEEE80211_TX_AGAIN:
/*
* Since there are no fragmented frames on A-MPDU
* queues, there's no reason for a driver to reject
* a frame there, warn and drop it.
*/
- if (ret != IEEE80211_TX_PENDING)
- if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
- goto drop;
+ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
+ goto drop;
+ /* fall through */
+ case IEEE80211_TX_PENDING:
+ skb = tx.skb;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+ if (__netif_subqueue_stopped(local->mdev, queue)) {
+ do {
+ next = skb->next;
+ skb->next = NULL;
+ if (unlikely(txpending))
+ skb_queue_head(&local->pending[queue],
+ skb);
+ else
+ skb_queue_tail(&local->pending[queue],
+ skb);
+ } while ((skb = next));
- store = &local->pending_packet[queue];
+ /*
+ * Make sure nobody will enable the queue on us
+ * (without going through the tasklet) nor disable the
+ * netdev queue underneath the pending handling code.
+ */
+ __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
+ &local->queue_stop_reasons[queue]);
- if (ret == IEEE80211_TX_FRAG_AGAIN)
- skb = NULL;
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+ } else {
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
- set_bit(queue, local->queues_pending);
- smp_mb();
- /*
- * When the driver gets out of buffers during sending of
- * fragments and calls ieee80211_stop_queue, the netif
- * subqueue is stopped. There is, however, a small window
- * in which the PENDING bit is not yet set. If a buffer
- * gets available in that window (i.e. driver calls
- * ieee80211_wake_queue), we would end up with ieee80211_tx
- * called with the PENDING bit still set. Prevent this by
- * continuing transmitting here when that situation is
- * possible to have happened.
- */
- if (!__netif_subqueue_stopped(local->mdev, queue)) {
- clear_bit(queue, local->queues_pending);
+ retries++;
+ if (WARN(retries > 10, "tx refused but queue active"))
+ goto drop;
goto retry;
}
- store->skb = skb;
- store->extra_frag = tx.extra_frag;
- store->num_extra_frag = tx.num_extra_frag;
}
out:
rcu_read_unlock();
- return 0;
+ return;
drop:
- if (skb)
- dev_kfree_skb(skb);
- for (i = 0; i < tx.num_extra_frag; i++)
- if (tx.extra_frag[i])
- dev_kfree_skb(tx.extra_frag[i]);
- kfree(tx.extra_frag);
rcu_read_unlock();
- return 0;
+
+ skb = tx.skb;
+ while (skb) {
+ next = skb->next;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
}
/* device xmit handlers */
@@ -1323,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
FOUND_SDATA,
UNKNOWN_ADDRESS,
} monitor_iface = NOT_MONITOR;
- int ret;
if (skb->iif)
odev = dev_get_by_index(&init_net, skb->iif);
@@ -1337,7 +1388,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
"originating device\n", dev->name);
#endif
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
@@ -1366,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
if (mesh_nexthop_lookup(skb, osdata)) {
dev_put(odev);
- return 0;
+ return NETDEV_TX_OK;
}
if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1428,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
dev_put(odev);
- return 0;
+ return NETDEV_TX_OK;
}
if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1437,10 +1488,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
u.ap);
if (likely(monitor_iface != UNKNOWN_ADDRESS))
info->control.vif = &osdata->vif;
- ret = ieee80211_tx(odev, skb);
+
+ ieee80211_tx(odev, skb, false);
dev_put(odev);
- return ret;
+ return NETDEV_TX_OK;
}
int ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1666,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
}
/* receiver and we are QoS enabled, use a QoS type frame */
- if (sta_flags & WLAN_STA_WME &&
- ieee80211_num_regular_queues(&local->hw) >= 4) {
+ if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1799,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
- int i, j;
- struct ieee80211_tx_stored_packet *store;
+ int i;
- for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
- if (!test_bit(i, local->queues_pending))
- continue;
- store = &local->pending_packet[i];
- kfree_skb(store->skb);
- for (j = 0; j < store->num_extra_frag; j++)
- kfree_skb(store->extra_frag[j]);
- kfree(store->extra_frag);
- clear_bit(i, local->queues_pending);
+ for (i = 0; i < local->hw.queues; i++)
+ skb_queue_purge(&local->pending[i]);
+}
+
+static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
+ struct ieee80211_hdr *hdr;
+ struct net_device *dev;
+ int ret;
+ bool result = true;
+
+ /* does interface still exist? */
+ dev = dev_get_by_index(&init_net, skb->iif);
+ if (!dev) {
+ dev_kfree_skb(skb);
+ return true;
}
+
+ /* validate info->control.vif against skb->iif */
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+
+ if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
+ dev_kfree_skb(skb);
+ result = true;
+ goto out;
+ }
+
+ if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
+ ieee80211_tx(dev, skb, true);
+ } else {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ sta = sta_info_get(local, hdr->addr1);
+
+ ret = __ieee80211_tx(local, &skb, sta);
+ if (ret != IEEE80211_TX_OK)
+ result = false;
+ }
+
+ out:
+ dev_put(dev);
+
+ return result;
}
/*
@@ -1822,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
struct net_device *dev = local->mdev;
- struct ieee80211_tx_stored_packet *store;
- struct ieee80211_tx_data tx;
- int i, ret;
+ unsigned long flags;
+ int i;
+ bool next;
+ rcu_read_lock();
netif_tx_lock_bh(dev);
- for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
- /* Check that this queue is ok */
- if (__netif_subqueue_stopped(local->mdev, i) &&
- !test_bit(i, local->queues_pending_run))
- continue;
- if (!test_bit(i, local->queues_pending)) {
- clear_bit(i, local->queues_pending_run);
- ieee80211_wake_queue(&local->hw, i);
+ for (i = 0; i < local->hw.queues; i++) {
+ /*
+ * If queue is stopped by something other than due to pending
+ * frames, or we have no pending frames, proceed to next queue.
+ */
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ next = false;
+ if (local->queue_stop_reasons[i] !=
+ BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
+ skb_queue_empty(&local->pending[i]))
+ next = true;
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ if (next)
continue;
- }
- clear_bit(i, local->queues_pending_run);
+ /*
+ * start the queue now to allow processing our packets,
+ * we're under the tx lock here anyway so nothing will
+ * happen as a result of this
+ */
netif_start_subqueue(local->mdev, i);
- store = &local->pending_packet[i];
- tx.extra_frag = store->extra_frag;
- tx.num_extra_frag = store->num_extra_frag;
- tx.flags = 0;
- ret = __ieee80211_tx(local, store->skb, &tx);
- if (ret) {
- if (ret == IEEE80211_TX_FRAG_AGAIN)
- store->skb = NULL;
- } else {
- clear_bit(i, local->queues_pending);
- ieee80211_wake_queue(&local->hw, i);
+ while (!skb_queue_empty(&local->pending[i])) {
+ struct sk_buff *skb = skb_dequeue(&local->pending[i]);
+
+ if (!ieee80211_tx_pending_skb(local, skb)) {
+ skb_queue_head(&local->pending[i], skb);
+ break;
+ }
}
+
+ /* Start regular packet processing again. */
+ if (skb_queue_empty(&local->pending[i]))
+ ieee80211_wake_queue_by_reason(&local->hw, i,
+ IEEE80211_QUEUE_STOP_REASON_PENDING);
}
+
netif_tx_unlock_bh(dev);
+ rcu_read_unlock();
}
/* functions for drivers to get certain frames */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e0431a1d218b..fdf432f14554 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -166,18 +166,13 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
-
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- if (tx->extra_frag) {
- struct ieee80211_hdr *fhdr;
- int i;
- for (i = 0; i < tx->num_extra_frag; i++) {
- fhdr = (struct ieee80211_hdr *)
- tx->extra_frag[i]->data;
- fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- }
- }
+ struct sk_buff *skb = tx->skb;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ } while ((skb = skb->next));
}
int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -344,42 +339,21 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
- if (queue >= hw->queues) {
- if (local->ampdu_ac_queue[queue - hw->queues] < 0)
- return;
-
- /*
- * for virtual aggregation queues, we need to refcount the
- * internal mac80211 disable (multiple times!), keep track of
- * driver disable _and_ make sure the regular queue is
- * actually enabled.
- */
- if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
- local->amdpu_ac_stop_refcnt[queue - hw->queues]--;
- else
- __clear_bit(reason, &local->queue_stop_reasons[queue]);
-
- if (local->queue_stop_reasons[queue] ||
- local->amdpu_ac_stop_refcnt[queue - hw->queues])
- return;
-
- /* now go on to treat the corresponding regular queue */
- queue = local->ampdu_ac_queue[queue - hw->queues];
- reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
- }
+ if (WARN_ON(queue >= hw->queues))
+ return;
__clear_bit(reason, &local->queue_stop_reasons[queue]);
+ if (!skb_queue_empty(&local->pending[queue]) &&
+ local->queue_stop_reasons[queue] ==
+ BIT(IEEE80211_QUEUE_STOP_REASON_PENDING))
+ tasklet_schedule(&local->tx_pending_tasklet);
+
if (local->queue_stop_reasons[queue] != 0)
/* someone still has this queue stopped */
return;
- if (test_bit(queue, local->queues_pending)) {
- set_bit(queue, local->queues_pending_run);
- tasklet_schedule(&local->tx_pending_tasklet);
- } else {
- netif_wake_subqueue(local->mdev, queue);
- }
+ netif_wake_subqueue(local->mdev, queue);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -405,29 +379,18 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
- if (queue >= hw->queues) {
- if (local->ampdu_ac_queue[queue - hw->queues] < 0)
- return;
-
- /*
- * for virtual aggregation queues, we need to refcount the
- * internal mac80211 disable (multiple times!), keep track of
- * driver disable _and_ make sure the regular queue is
- * actually enabled.
- */
- if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
- local->amdpu_ac_stop_refcnt[queue - hw->queues]++;
- else
- __set_bit(reason, &local->queue_stop_reasons[queue]);
+ if (WARN_ON(queue >= hw->queues))
+ return;
- /* now go on to treat the corresponding regular queue */
- queue = local->ampdu_ac_queue[queue - hw->queues];
- reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
- }
+ /*
+ * Only stop if it was previously running, this is necessary
+ * for correct pending packets handling because there we may
+ * start (but not wake) the queue and rely on that.
+ */
+ if (!local->queue_stop_reasons[queue])
+ netif_stop_subqueue(local->mdev, queue);
__set_bit(reason, &local->queue_stop_reasons[queue]);
-
- netif_stop_subqueue(local->mdev, queue);
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -473,15 +436,9 @@ EXPORT_SYMBOL(ieee80211_stop_queues);
int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
{
struct ieee80211_local *local = hw_to_local(hw);
- unsigned long flags;
- if (queue >= hw->queues) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- queue = local->ampdu_ac_queue[queue - hw->queues];
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- if (queue < 0)
- return true;
- }
+ if (WARN_ON(queue >= hw->queues))
+ return true;
return __netif_subqueue_stopped(local->mdev, queue);
}
@@ -496,7 +453,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- for (i = 0; i < hw->queues + hw->ampdu_queues; i++)
+ for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i, reason);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -846,16 +803,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- const u8 *ie_auth = NULL;
- int ie_auth_len = 0;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- ie_auth_len = sdata->u.mgd.ie_auth_len;
- ie_auth = sdata->u.mgd.ie_auth;
- }
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + 6 + extra_len + ie_auth_len);
+ sizeof(*mgmt) + 6 + extra_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
"frame\n", sdata->dev->name);
@@ -877,8 +827,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
mgmt->u.auth.status_code = cpu_to_le16(0);
if (extra)
memcpy(skb_put(skb, extra_len), extra, extra_len);
- if (ie_auth)
- memcpy(skb_put(skb, ie_auth_len), ie_auth, ie_auth_len);
ieee80211_tx_skb(sdata, skb, encrypt);
}
@@ -891,20 +839,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos, *supp_rates, *esupp_rates = NULL, *extra_preq_ie = NULL;
- int i, extra_preq_ie_len = 0;
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- extra_preq_ie_len = sdata->u.mgd.ie_probereq_len;
- extra_preq_ie = sdata->u.mgd.ie_probereq;
- break;
- default:
- break;
- }
+ u8 *pos, *supp_rates, *esupp_rates = NULL;
+ int i;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
- ie_len + extra_preq_ie_len);
+ ie_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
"request\n", sdata->dev->name);
@@ -953,9 +892,6 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
if (ie)
memcpy(skb_put(skb, ie_len), ie, ie_len);
- if (extra_preq_ie)
- memcpy(skb_put(skb, extra_preq_ie_len), extra_preq_ie,
- extra_preq_ie_len);
ieee80211_tx_skb(sdata, skb, 0);
}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7043ddc75498..ef73105b3061 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -329,24 +329,17 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
ieee80211_tx_result
ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
{
- int i;
+ struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
- if (wep_encrypt_skb(tx, tx->skb) < 0) {
- I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
- return TX_DROP;
- }
-
- if (tx->extra_frag) {
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (wep_encrypt_skb(tx, tx->extra_frag[i])) {
- I802_DEBUG_INC(tx->local->
- tx_handlers_drop_wep);
- return TX_DROP;
- }
+ skb = tx->skb;
+ do {
+ if (wep_encrypt_skb(tx, skb) < 0) {
+ I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
+ return TX_DROP;
}
- }
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 935c63ed3dfa..deb4ecec122a 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -129,14 +129,12 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
- return -EOPNOTSUPP;
-
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
if (ret)
return ret;
sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
+ sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
ieee80211_sta_req_auth(sdata);
return 0;
}
@@ -207,14 +205,6 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
- if (len > IEEE80211_MAX_SSID_LEN)
- return -EINVAL;
- memcpy(sdata->u.mgd.ssid, ssid, len);
- sdata->u.mgd.ssid_len = len;
- return 0;
- }
-
if (data->flags)
sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
else
@@ -224,6 +214,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
if (ret)
return ret;
+ sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
ieee80211_sta_req_auth(sdata);
return 0;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
@@ -272,11 +263,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
int ret;
- if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
- memcpy(sdata->u.mgd.bssid, (u8 *) &ap_addr->sa_data,
- ETH_ALEN);
- return 0;
- }
+
if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL |
IEEE80211_STA_AUTO_CHANNEL_SEL;
@@ -287,6 +274,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
if (ret)
return ret;
+ sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
ieee80211_sta_req_auth(sdata);
return 0;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
@@ -630,7 +618,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
struct ieee80211_sub_if_data *sdata;
int idx, i, alg = ALG_WEP;
u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- int remove = 0;
+ int remove = 0, ret;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -656,11 +644,20 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
return 0;
}
- return ieee80211_set_encryption(
+ ret = ieee80211_set_encryption(
sdata, bcaddr,
idx, alg, remove,
!sdata->default_key,
keybuf, erq->length);
+
+ if (!ret) {
+ if (remove)
+ sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED;
+ else
+ sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED;
+ }
+
+ return ret;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9101b48ec2ae..4f8bfea278f2 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -196,19 +196,13 @@ ieee80211_tx_result
ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
- int i;
ieee80211_tx_set_protected(tx);
- if (tkip_encrypt_skb(tx, skb) < 0)
- return TX_DROP;
-
- if (tx->extra_frag) {
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (tkip_encrypt_skb(tx, tx->extra_frag[i]))
- return TX_DROP;
- }
- }
+ do {
+ if (tkip_encrypt_skb(tx, skb) < 0)
+ return TX_DROP;
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
@@ -428,19 +422,13 @@ ieee80211_tx_result
ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
- int i;
ieee80211_tx_set_protected(tx);
- if (ccmp_encrypt_skb(tx, skb) < 0)
- return TX_DROP;
-
- if (tx->extra_frag) {
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (ccmp_encrypt_skb(tx, tx->extra_frag[i]))
- return TX_DROP;
- }
- }
+ do {
+ if (ccmp_encrypt_skb(tx, skb) < 0)
+ return TX_DROP;
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 2562d05dbaf5..2c967e4f706c 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -374,7 +374,7 @@ config NETFILTER_XT_TARGET_HL
config NETFILTER_XT_TARGET_LED
tristate '"LED" target support'
- depends on LEDS_CLASS
+ depends on LEDS_CLASS && LED_TRIGGERS
depends on NETFILTER_ADVANCED
help
This option adds a `LED' target, which allows you to blink LEDs in
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index dfb447b584da..8020db6274b8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -29,6 +29,7 @@
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
+#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -163,8 +164,8 @@ static void
clean_from_lists(struct nf_conn *ct)
{
pr_debug("clean_from_lists(%p)\n", ct);
- hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
- hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
/* Destroy all pending expectations */
nf_ct_remove_expectations(ct);
@@ -204,8 +205,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
/* We overload first tuple to link into unconfirmed list. */
if (!nf_ct_is_confirmed(ct)) {
- BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
- hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
+ BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
}
NF_CT_STAT_INC(net, delete);
@@ -242,18 +243,26 @@ static void death_by_timeout(unsigned long ul_conntrack)
nf_ct_put(ct);
}
+/*
+ * Warning :
+ * - Caller must take a reference on returned object
+ * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
+ * OR
+ * - Caller must lock nf_conntrack_lock before calling this function
+ */
struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
*/
local_bh_disable();
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
+begin:
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(net, found);
local_bh_enable();
@@ -261,6 +270,13 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
}
NF_CT_STAT_INC(net, searched);
}
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(n) != hash)
+ goto begin;
local_bh_enable();
return NULL;
@@ -275,11 +291,18 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
struct nf_conn *ct;
rcu_read_lock();
+begin:
h = __nf_conntrack_find(net, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
+ else {
+ if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
+ nf_ct_put(ct);
+ goto begin;
+ }
+ }
}
rcu_read_unlock();
@@ -293,9 +316,9 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
{
struct net *net = nf_ct_net(ct);
- hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.hash[hash]);
- hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&net->ct.hash[repl_hash]);
}
@@ -318,7 +341,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -350,17 +373,17 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
- hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple))
goto out;
- hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple))
goto out;
/* Remove from unconfirmed list */
- hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
/* Timer relative to confirmation time, not original
@@ -399,14 +422,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
{
struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
*/
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(net, found);
@@ -430,14 +453,14 @@ static noinline int early_drop(struct net *net, unsigned int hash)
/* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct = NULL, *tmp;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
unsigned int i, cnt = 0;
int dropped = 0;
rcu_read_lock();
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
- hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
+ hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
ct = tmp;
@@ -508,27 +531,19 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
#ifdef CONFIG_NET_NS
ct->ct_net = net;
#endif
- INIT_RCU_HEAD(&ct->rcu);
return ct;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
-static void nf_conntrack_free_rcu(struct rcu_head *head)
-{
- struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
-
- nf_ct_ext_free(ct);
- kmem_cache_free(nf_conntrack_cachep, ct);
-}
-
void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count);
- call_rcu(&ct->rcu, nf_conntrack_free_rcu);
+ nf_ct_ext_free(ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
@@ -594,7 +609,7 @@ init_conntrack(struct net *net,
}
/* Overload tuple linked list to put us in unconfirmed list. */
- hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.unconfirmed);
spin_unlock_bh(&nf_conntrack_lock);
@@ -906,6 +921,12 @@ int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
+
+int nf_ct_port_nlattr_tuple_size(void)
+{
+ return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
@@ -934,17 +955,17 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
- hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
goto found;
}
}
- hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
set_bit(IPS_DYING_BIT, &ct->status);
@@ -992,7 +1013,7 @@ static int kill_all(struct nf_conn *i, void *data)
return 1;
}
-void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
{
if (vmalloced)
vfree(hash);
@@ -1060,26 +1081,28 @@ void nf_conntrack_cleanup(struct net *net)
}
}
-struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
{
- struct hlist_head *hash;
- unsigned int size, i;
+ struct hlist_nulls_head *hash;
+ unsigned int nr_slots, i;
+ size_t sz;
*vmalloced = 0;
- size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
- hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
- get_order(sizeof(struct hlist_head)
- * size));
+ BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+ nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+ sz = nr_slots * sizeof(struct hlist_nulls_head);
+ hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ get_order(sz));
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = vmalloc(sizeof(struct hlist_head) * size);
+ hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
}
- if (hash)
- for (i = 0; i < size; i++)
- INIT_HLIST_HEAD(&hash[i]);
+ if (hash && nulls)
+ for (i = 0; i < nr_slots; i++)
+ INIT_HLIST_NULLS_HEAD(&hash[i], i);
return hash;
}
@@ -1090,7 +1113,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
int i, bucket, vmalloced, old_vmalloced;
unsigned int hashsize, old_size;
int rnd;
- struct hlist_head *hash, *old_hash;
+ struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
/* On boot, we can set this without any fancy locking. */
@@ -1101,7 +1124,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hashsize)
return -EINVAL;
- hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
+ hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
if (!hash)
return -ENOMEM;
@@ -1116,12 +1139,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
*/
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_conntrack_htable_size; i++) {
- while (!hlist_empty(&init_net.ct.hash[i])) {
- h = hlist_entry(init_net.ct.hash[i].first,
- struct nf_conntrack_tuple_hash, hnode);
- hlist_del_rcu(&h->hnode);
+ while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
+ h = hlist_nulls_entry(init_net.ct.hash[i].first,
+ struct nf_conntrack_tuple_hash, hnnode);
+ hlist_nulls_del_rcu(&h->hnnode);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
- hlist_add_head(&h->hnode, &hash[bucket]);
+ hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
old_size = nf_conntrack_htable_size;
@@ -1172,7 +1195,7 @@ static int nf_conntrack_init_init_net(void)
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
- 0, 0, NULL);
+ 0, SLAB_DESTROY_BY_RCU, NULL);
if (!nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
ret = -ENOMEM;
@@ -1202,7 +1225,7 @@ static int nf_conntrack_init_net(struct net *net)
int ret;
atomic_set(&net->ct.count, 0);
- INIT_HLIST_HEAD(&net->ct.unconfirmed);
+ INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat) {
ret = -ENOMEM;
@@ -1212,7 +1235,7 @@ static int nf_conntrack_init_net(struct net *net)
if (ret < 0)
goto err_ecache;
net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
- &net->ct.hash_vmalloc);
+ &net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 357ba39d4c8d..3940f996a2e4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -604,7 +604,7 @@ int nf_conntrack_expect_init(struct net *net)
net->ct.expect_count = 0;
net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
- &net->ct.expect_vmalloc);
+ &net->ct.expect_vmalloc, 0);
if (net->ct.expect_hash == NULL)
goto err1;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a51bdac9f3a0..30b8e9009f99 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -142,6 +142,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
+ BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
mutex_lock(&nf_ct_helper_mutex);
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
@@ -158,6 +159,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_expect *exp;
const struct hlist_node *n, *next;
+ const struct hlist_nulls_node *nn;
unsigned int i;
/* Get rid of expectations */
@@ -174,10 +176,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
}
/* Get rid of expecteds, set helpers to NULL. */
- hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode)
+ hlist_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
unhelp(h, me);
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry(h, n, &net->ct.hash[i], hnode)
+ hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
unhelp(h, me);
}
}
@@ -217,7 +219,7 @@ int nf_conntrack_helper_init(void)
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
- &nf_ct_helper_vmalloc);
+ &nf_ct_helper_vmalloc, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7a16bd462f82..c6439c77953c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
+#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
@@ -404,6 +405,78 @@ nla_put_failure:
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+/*
+ * The general structure of a ctnetlink event is
+ *
+ * CTA_TUPLE_ORIG
+ * <l3/l4-proto-attributes>
+ * CTA_TUPLE_REPLY
+ * <l3/l4-proto-attributes>
+ * CTA_ID
+ * ...
+ * CTA_PROTOINFO
+ * <l4-proto-attributes>
+ * CTA_TUPLE_MASTER
+ * <l3/l4-proto-attributes>
+ *
+ * Therefore the formular is
+ *
+ * size = sizeof(headers) + sizeof(generic_nlas) + 3 * sizeof(tuple_nlas)
+ * + sizeof(protoinfo_nlas)
+ */
+static struct sk_buff *
+ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp)
+{
+ struct nf_conntrack_l3proto *l3proto;
+ struct nf_conntrack_l4proto *l4proto;
+ int len;
+
+#define NLA_TYPE_SIZE(type) nla_total_size(sizeof(type))
+
+ /* proto independant part */
+ len = NLMSG_SPACE(sizeof(struct nfgenmsg))
+ + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */
+#ifdef CONFIG_NF_CT_ACCT
+ + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
+ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */
+ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */
+#endif
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */
+ + nla_total_size(0) /* CTA_PROTOINFO */
+ + nla_total_size(0) /* CTA_HELP */
+ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */
+#endif
+#ifdef CONFIG_NF_NAT_NEEDED
+ + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */
+ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ + NLA_TYPE_SIZE(u_int32_t) /* CTA_MARK */
+#endif
+ ;
+
+#undef NLA_TYPE_SIZE
+
+ rcu_read_lock();
+ l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
+ len += l3proto->nla_size;
+
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+ len += l4proto->nla_size;
+ rcu_read_unlock();
+
+ return alloc_skb(len, gfp);
+}
+
static int ctnetlink_conntrack_event(struct notifier_block *this,
unsigned long events, void *ptr)
{
@@ -437,7 +510,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
if (!item->report && !nfnetlink_has_listeners(group))
return NOTIFY_DONE;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC);
if (!skb)
return NOTIFY_DONE;
@@ -536,7 +609,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
@@ -544,27 +617,27 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
- hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
- hnode) {
+ hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
+ hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ continue;
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto)
- continue;
+ goto releasect;
if (cb->args[1]) {
if (ct != last)
- continue;
+ goto releasect;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_CT_NEW,
1, ct) < 0) {
- if (!atomic_inc_not_zero(&ct->ct_general.use))
- continue;
cb->args[1] = (unsigned long)ct;
goto out;
}
@@ -577,6 +650,8 @@ restart:
if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
}
+releasect:
+ nf_ct_put(ct);
}
if (cb->args[1]) {
cb->args[1] = 0;
@@ -1242,13 +1317,12 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
if (err < 0)
goto err2;
- master_h = __nf_conntrack_find(&init_net, &master);
+ master_h = nf_conntrack_find_get(&init_net, &master);
if (master_h == NULL) {
err = -ENOENT;
goto err2;
}
master_ct = nf_ct_tuplehash_to_ctrack(master_h);
- nf_conntrack_get(&master_ct->ct_general);
__set_bit(IPS_EXPECTED_BIT, &ct->status);
ct->master = master_ct;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 9a62b4efa0e1..1a4568bf7ea5 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -167,6 +167,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
if (proto->l3proto >= AF_MAX)
return -EBUSY;
+ if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size)
+ return -EINVAL;
+
mutex_lock(&nf_ct_proto_mutex);
if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
ret = -EBUSY;
@@ -177,6 +180,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
if (ret < 0)
goto out_unlock;
+ if (proto->nlattr_tuple_size)
+ proto->nla_size = 3 * proto->nlattr_tuple_size();
+
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
out_unlock:
@@ -263,6 +269,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
if (l4proto->l3proto >= PF_MAX)
return -EBUSY;
+ if ((l4proto->to_nlattr && !l4proto->nlattr_size)
+ || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
+ return -EINVAL;
+
mutex_lock(&nf_ct_proto_mutex);
if (!nf_ct_protos[l4proto->l3proto]) {
/* l3proto may be loaded latter. */
@@ -290,6 +300,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
if (ret < 0)
goto out_unlock;
+ l4proto->nla_size = 0;
+ if (l4proto->nlattr_size)
+ l4proto->nla_size += l4proto->nlattr_size();
+ if (l4proto->nlattr_tuple_size)
+ l4proto->nla_size += 3 * l4proto->nlattr_tuple_size();
+
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d3d5a7fd73ce..50dac8dbe7d8 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -669,6 +669,12 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
write_unlock_bh(&dccp_lock);
return 0;
}
+
+static int dccp_nlattr_size(void)
+{
+ return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
+ + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -749,8 +755,10 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.print_conntrack = dccp_print_conntrack,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = dccp_to_nlattr,
+ .nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
@@ -771,6 +779,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.to_nlattr = dccp_to_nlattr,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 1b279f9d6bf3..117b80112fcb 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -293,6 +293,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.me = THIS_MODULE,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 74e037901199..101b4ad9e817 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -537,6 +537,12 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
return 0;
}
+
+static int sctp_nlattr_size(void)
+{
+ return nla_total_size(0) /* CTA_PROTOINFO_SCTP */
+ + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -668,8 +674,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.me = THIS_MODULE,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = sctp_to_nlattr,
+ .nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
@@ -696,8 +704,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.me = THIS_MODULE,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = sctp_to_nlattr,
+ .nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 0aeb8b09a1f7..b5ccf2b4b2e7 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1184,6 +1184,17 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
return 0;
}
+
+static int tcp_nlattr_size(void)
+{
+ return nla_total_size(0) /* CTA_PROTOINFO_TCP */
+ + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
+}
+
+static int tcp_nlattr_tuple_size(void)
+{
+ return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+}
#endif
#ifdef CONFIG_SYSCTL
@@ -1399,9 +1410,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
+ .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
@@ -1429,9 +1442,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
+ .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index d4021179e24e..70809d117b91 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -195,6 +195,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
@@ -222,6 +223,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4579d8de13b1..4614696c1b88 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -180,6 +180,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.error = udplite_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
+ .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 4da54b0b9233..193515381970 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -44,40 +44,42 @@ struct ct_iter_state {
unsigned int bucket;
};
-static struct hlist_node *ct_get_first(struct seq_file *seq)
+static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- struct hlist_node *n;
+ struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
- if (n)
+ if (!is_a_nulls(n))
return n;
}
return NULL;
}
-static struct hlist_node *ct_get_next(struct seq_file *seq,
- struct hlist_node *head)
+static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
+ struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(head->next);
- while (head == NULL) {
- if (++st->bucket >= nf_conntrack_htable_size)
- return NULL;
+ while (is_a_nulls(head)) {
+ if (likely(get_nulls_value(head) == st->bucket)) {
+ if (++st->bucket >= nf_conntrack_htable_size)
+ return NULL;
+ }
head = rcu_dereference(net->ct.hash[st->bucket].first);
}
return head;
}
-static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
+static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
- struct hlist_node *head = ct_get_first(seq);
+ struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
@@ -107,67 +109,74 @@ static void ct_seq_stop(struct seq_file *s, void *v)
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
- const struct nf_conntrack_tuple_hash *hash = v;
- const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
+ struct nf_conntrack_tuple_hash *hash = v;
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
+ int ret = 0;
NF_CT_ASSERT(ct);
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ return 0;
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
- return 0;
+ goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
+ ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %-8s %u %ld ",
l3proto->name, nf_ct_l3num(ct),
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
- return -ENOSPC;
+ goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
- return -ENOSPC;
+ goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
- return -ENOSPC;
+ goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
- return -ENOSPC;
+ goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
- return -ENOSPC;
+ goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
- return -ENOSPC;
+ goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (seq_printf(s, "mark=%u ", ct->mark))
- return -ENOSPC;
+ goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
- return -ENOSPC;
+ goto release;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
- return -ENOSPC;
+ goto release;
+ ret = 0;
+release:
+ nf_ct_put(ct);
return 0;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 7f404cc64c83..680980954395 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -108,7 +108,7 @@ static int count_them(struct xt_connlimit_data *data,
const struct nf_conntrack_tuple_hash *found;
struct xt_connlimit_conn *conn;
struct xt_connlimit_conn *tmp;
- const struct nf_conn *found_ct;
+ struct nf_conn *found_ct;
struct list_head *hash;
bool addit = true;
int matches = 0;
@@ -123,7 +123,7 @@ static int count_them(struct xt_connlimit_data *data,
/* check the saved connections */
list_for_each_entry_safe(conn, tmp, hash, list) {
- found = __nf_conntrack_find(&init_net, &conn->tuple);
+ found = nf_conntrack_find_get(&init_net, &conn->tuple);
found_ct = NULL;
if (found != NULL)
@@ -151,6 +151,7 @@ static int count_them(struct xt_connlimit_data *data,
* we do not care about connections which are
* closed already -> ditch it
*/
+ nf_ct_put(found_ct);
list_del(&conn->list);
kfree(conn);
continue;
@@ -160,6 +161,7 @@ static int count_them(struct xt_connlimit_data *data,
match->family))
/* same source network -> be counted! */
++matches;
+ nf_ct_put(found_ct);
}
rcu_read_unlock();
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 44a234ef4439..8d28ca5848bc 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -20,23 +20,6 @@ MODULE_DESCRIPTION("Xtables: Bridge physical device match");
MODULE_ALIAS("ipt_physdev");
MODULE_ALIAS("ip6t_physdev");
-static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
-{
- const unsigned long *a = (const unsigned long *)_a;
- const unsigned long *b = (const unsigned long *)_b;
- const unsigned long *mask = (const unsigned long *)_mask;
- unsigned long ret;
-
- ret = (a[0] ^ b[0]) & mask[0];
- if (IFNAMSIZ > sizeof(unsigned long))
- ret |= (a[1] ^ b[1]) & mask[1];
- if (IFNAMSIZ > 2 * sizeof(unsigned long))
- ret |= (a[2] ^ b[2]) & mask[2];
- if (IFNAMSIZ > 3 * sizeof(unsigned long))
- ret |= (a[3] ^ b[3]) & mask[3];
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
- return ret;
-}
static bool
physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -85,7 +68,7 @@ physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (!(info->bitmask & XT_PHYSDEV_OP_IN))
goto match_outdev;
indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
- ret = ifname_compare(indev, info->physindev, info->in_mask);
+ ret = ifname_compare_aligned(indev, info->physindev, info->in_mask);
if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
return false;
@@ -95,7 +78,7 @@ match_outdev:
return true;
outdev = nf_bridge->physoutdev ?
nf_bridge->physoutdev->name : nulldevname;
- ret = ifname_compare(outdev, info->physoutdev, info->out_mask);
+ ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask);
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 6d9c58ec56ac..4e705f87969f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1037,10 +1037,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
unsigned char *asmptr;
int size;
- /* Netrom empty data frame has no meaning : don't send */
- if (len == 0)
- return 0;
-
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
@@ -1086,7 +1082,11 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
- /* Build a packet */
+ /* Build a packet - the conventional user limit is 236 bytes. We can
+ do ludicrously large NetROM frames but must not overflow */
+ if (len > 65536)
+ return -EMSGSIZE;
+
SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
@@ -1171,11 +1171,6 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
copied = skb->len;
- /* NetRom empty data frame has no meaning : ignore it */
- if (copied == 0) {
- goto out;
- }
-
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
@@ -1191,7 +1186,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_namelen = sizeof(*sax);
-out: skb_free_datagram(sk, skb);
+ skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 650139626581..0f36e8d59b29 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Build a packet */
SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
+ /* Sanity check the packet size */
+ if (len > 65535)
+ return -EMSGSIZE;
+
size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
diff --git a/net/socket.c b/net/socket.c
index af0205ff56f2..0b14b79c03af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -328,7 +328,7 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
dentry->d_inode->i_ino);
}
-static struct dentry_operations sockfs_dentry_operations = {
+static const struct dentry_operations sockfs_dentry_operations = {
.d_delete = sockfs_delete_dentry,
.d_dname = sockfs_dname,
};
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 577385a4a5dc..9ced0628d69c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -480,7 +480,7 @@ static int rpc_delete_dentry(struct dentry *dentry)
return 1;
}
-static struct dentry_operations rpc_dentry_operations = {
+static const struct dentry_operations rpc_dentry_operations = {
.d_delete = rpc_delete_dentry,
};
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 092ae6faccca..3c3bc9e579ed 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -10,51 +10,19 @@ config CFG80211_REG_DEBUG
If unsure, say N.
-config NL80211
- bool "nl80211 new netlink interface support"
- depends on CFG80211
- default y
- ---help---
- This option turns on the new netlink interface
- (nl80211) support in cfg80211.
-
- If =n, drivers using mac80211 will be configured via
- wireless extension support provided by that subsystem.
-
- If unsure, say Y.
-
config WIRELESS_OLD_REGULATORY
bool "Old wireless static regulatory definitions"
- default y
+ default n
---help---
This option enables the old static regulatory information
- and uses it within the new framework. This is available
- temporarily as an option to help prevent immediate issues
- due to the switch to the new regulatory framework which
- does require a new userspace application which has the
- database of regulatory information (CRDA) and another for
- setting regulatory domains (iw).
-
- For more information see:
-
- http://wireless.kernel.org/en/developers/Regulatory/CRDA
- http://wireless.kernel.org/en/users/Documentation/iw
-
- It is important to note though that if you *do* have CRDA present
- and if this option is enabled CRDA *will* be called to update the
- regulatory domain (for US and JP only). Support for letting the user
- set the regulatory domain through iw is also supported. This option
- mainly exists to leave around for a kernel release some old static
- regulatory domains that were defined and to keep around the old
- ieee80211_regdom module parameter. This is being phased out and you
- should stop using them ASAP.
-
- Note: You will need CRDA if you want 802.11d support
-
- Say Y unless you have installed a new userspace application.
- Also say Y if have one currently depending on the ieee80211_regdom
- module parameter and cannot port it to use the new userspace
- interfaces.
+ and uses it within the new framework. This option is available
+ for historical reasons and it is advised to leave it off.
+
+ For details see:
+
+ http://wireless.kernel.org/en/developers/Regulatory
+
+ Say N and if you say Y, please tell us why. The default is N.
config WIRELESS_EXT
bool "Wireless extensions"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index dad43c24f695..6d1e7b27b752 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,8 +5,7 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
-cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o
+cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o
cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
-cfg80211-$(CONFIG_NL80211) += nl80211.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 17fe39049740..d1f556535f6d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -87,7 +87,7 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
}
/* requires cfg80211_mutex to be held! */
-static struct cfg80211_registered_device *
+struct cfg80211_registered_device *
__cfg80211_drv_from_info(struct genl_info *info)
{
int ifindex;
@@ -176,13 +176,14 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv)
mutex_unlock(&drv->mtx);
}
+/* requires cfg80211_mutex to be held */
int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
char *newname)
{
struct cfg80211_registered_device *drv;
int wiphy_idx, taken = -1, result, digits;
- mutex_lock(&cfg80211_mutex);
+ assert_cfg80211_lock();
/* prohibit calling the thing phy%d when %d is not its number */
sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
@@ -195,30 +196,23 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
* deny the name if it is phy<idx> where <idx> is printed
* without leading zeroes. taken == strlen(newname) here
*/
- result = -EINVAL;
if (taken == strlen(PHY_NAME) + digits)
- goto out_unlock;
+ return -EINVAL;
}
/* Ignore nop renames */
- result = 0;
if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
- goto out_unlock;
+ return 0;
/* Ensure another device does not already have this name. */
- list_for_each_entry(drv, &cfg80211_drv_list, list) {
- result = -EINVAL;
+ list_for_each_entry(drv, &cfg80211_drv_list, list)
if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
- goto out_unlock;
- }
+ return -EINVAL;
- /* this will only check for collisions in sysfs
- * which is not even always compiled in.
- */
result = device_rename(&rdev->wiphy.dev, newname);
if (result)
- goto out_unlock;
+ return result;
if (rdev->wiphy.debugfsdir &&
!debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
@@ -228,13 +222,9 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
newname);
- result = 0;
-out_unlock:
- mutex_unlock(&cfg80211_mutex);
- if (result == 0)
- nl80211_notify_dev_rename(rdev);
+ nl80211_notify_dev_rename(rdev);
- return result;
+ return 0;
}
/* exported functions */
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 6acd483a61f8..d43daa236ef9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -90,6 +90,8 @@ struct cfg80211_internal_bss {
struct rb_node rbn;
unsigned long ts;
struct kref ref;
+ bool hold;
+
/* must be last because of priv member */
struct cfg80211_bss pub;
};
@@ -97,6 +99,9 @@ struct cfg80211_internal_bss {
struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx);
int get_wiphy_idx(struct wiphy *wiphy);
+struct cfg80211_registered_device *
+__cfg80211_drv_from_info(struct genl_info *info);
+
/*
* This function returns a pointer to the driver
* that the genl_info item that is passed refers to.
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
new file mode 100644
index 000000000000..bec5721b6f99
--- /dev/null
+++ b/net/wireless/mlme.c
@@ -0,0 +1,46 @@
+/*
+ * cfg80211 MLME SAP interface
+ *
+ * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+#include <net/cfg80211.h>
+#include "core.h"
+#include "nl80211.h"
+
+void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ nl80211_send_rx_auth(rdev, dev, buf, len);
+}
+EXPORT_SYMBOL(cfg80211_send_rx_auth);
+
+void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ nl80211_send_rx_assoc(rdev, dev, buf, len);
+}
+EXPORT_SYMBOL(cfg80211_send_rx_assoc);
+
+void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, size_t len)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ nl80211_send_rx_deauth(rdev, dev, buf, len);
+}
+EXPORT_SYMBOL(cfg80211_send_rx_deauth);
+
+void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf,
+ size_t len)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ nl80211_send_rx_disassoc(rdev, dev, buf, len);
+}
+EXPORT_SYMBOL(cfg80211_send_rx_disassoc);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ab9d8f14e151..353e1a4ece83 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -111,6 +111,11 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED },
+
+ [NL80211_ATTR_SSID] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_SSID_LEN },
+ [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
};
/* message building helper */
@@ -131,6 +136,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct nlattr *nl_freqs, *nl_freq;
struct nlattr *nl_rates, *nl_rate;
struct nlattr *nl_modes;
+ struct nlattr *nl_cmds;
enum ieee80211_band band;
struct ieee80211_channel *chan;
struct ieee80211_rate *rate;
@@ -242,6 +248,35 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
nla_nest_end(msg, nl_bands);
+ nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
+ if (!nl_cmds)
+ goto nla_put_failure;
+
+ i = 0;
+#define CMD(op, n) \
+ do { \
+ if (dev->ops->op) { \
+ i++; \
+ NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \
+ } \
+ } while (0)
+
+ CMD(add_virtual_intf, NEW_INTERFACE);
+ CMD(change_virtual_intf, SET_INTERFACE);
+ CMD(add_key, NEW_KEY);
+ CMD(add_beacon, NEW_BEACON);
+ CMD(add_station, NEW_STATION);
+ CMD(add_mpath, NEW_MPATH);
+ CMD(set_mesh_params, SET_MESH_PARAMS);
+ CMD(change_bss, SET_BSS);
+ CMD(auth, AUTHENTICATE);
+ CMD(assoc, ASSOCIATE);
+ CMD(deauth, DEAUTHENTICATE);
+ CMD(disassoc, DISASSOCIATE);
+
+#undef CMD
+ nla_nest_end(msg, nl_cmds);
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -331,16 +366,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
int result = 0, rem_txq_params = 0;
struct nlattr *nl_txq_params;
- rdev = cfg80211_get_dev_from_info(info);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
+ rtnl_lock();
+
+ mutex_lock(&cfg80211_mutex);
- if (info->attrs[NL80211_ATTR_WIPHY_NAME]) {
+ rdev = __cfg80211_drv_from_info(info);
+ if (IS_ERR(rdev)) {
+ result = PTR_ERR(rdev);
+ goto unlock;
+ }
+
+ mutex_lock(&rdev->mtx);
+
+ if (info->attrs[NL80211_ATTR_WIPHY_NAME])
result = cfg80211_dev_rename(
rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME]));
- if (result)
- goto bad_res;
- }
+
+ mutex_unlock(&cfg80211_mutex);
+
+ if (result)
+ goto bad_res;
if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
struct ieee80211_txq_params txq_params;
@@ -436,7 +481,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
bad_res:
- cfg80211_put_dev(rdev);
+ mutex_unlock(&rdev->mtx);
+ unlock:
+ rtnl_unlock();
return result;
}
@@ -572,21 +619,31 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
enum nl80211_iftype type;
struct net_device *dev;
u32 _flags, *flags = NULL;
+ bool change = false;
memset(&params, 0, sizeof(params));
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
+
ifindex = dev->ifindex;
type = dev->ieee80211_ptr->iftype;
dev_put(dev);
- err = -EINVAL;
if (info->attrs[NL80211_ATTR_IFTYPE]) {
- type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
- if (type > NL80211_IFTYPE_MAX)
+ enum nl80211_iftype ntype;
+
+ ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
+ if (type != ntype)
+ change = true;
+ type = ntype;
+ if (type > NL80211_IFTYPE_MAX) {
+ err = -EINVAL;
goto unlock;
+ }
}
if (!drv->ops->change_virtual_intf ||
@@ -602,6 +659,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
+ change = true;
}
if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
@@ -611,20 +669,26 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
&_flags);
- if (!err)
- flags = &_flags;
+ if (err)
+ goto unlock;
+
+ flags = &_flags;
+ change = true;
}
- rtnl_lock();
- err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
- type, flags, &params);
+
+ if (change)
+ err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
+ type, flags, &params);
+ else
+ err = 0;
dev = __dev_get_by_index(&init_net, ifindex);
WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type));
- rtnl_unlock();
-
unlock:
cfg80211_put_dev(drv);
+ unlock_rtnl:
+ rtnl_unlock();
return err;
}
@@ -647,9 +711,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ rtnl_lock();
+
drv = cfg80211_get_dev_from_info(info);
- if (IS_ERR(drv))
- return PTR_ERR(drv);
+ if (IS_ERR(drv)) {
+ err = PTR_ERR(drv);
+ goto unlock_rtnl;
+ }
if (!drv->ops->add_virtual_intf ||
!(drv->wiphy.interface_modes & (1 << type))) {
@@ -663,18 +731,17 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
}
- rtnl_lock();
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
err = drv->ops->add_virtual_intf(&drv->wiphy,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
- rtnl_unlock();
-
unlock:
cfg80211_put_dev(drv);
+ unlock_rtnl:
+ rtnl_unlock();
return err;
}
@@ -684,9 +751,11 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
int ifindex, err;
struct net_device *dev;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
ifindex = dev->ifindex;
dev_put(dev);
@@ -695,12 +764,12 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
+ unlock_rtnl:
+ rtnl_unlock();
return err;
}
@@ -752,9 +821,11 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
if (!drv->ops->get_key) {
err = -EOPNOTSUPP;
@@ -782,10 +853,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (mac_addr)
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
- rtnl_lock();
err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr,
&cookie, get_key_callback);
- rtnl_unlock();
if (err)
goto out;
@@ -803,6 +872,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -831,9 +903,11 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT])
return -EINVAL;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
if (info->attrs[NL80211_ATTR_KEY_DEFAULT])
func = drv->ops->set_default_key;
@@ -845,13 +919,15 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
err = func(&drv->wiphy, dev, key_idx);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -921,22 +997,25 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
if (!drv->ops->add_key) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, &params);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -957,22 +1036,26 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
if (!drv->ops->del_key) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -986,9 +1069,16 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
struct beacon_parameters params;
int haveinfo = 0;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
switch (info->genlhdr->cmd) {
case NL80211_CMD_NEW_BEACON:
@@ -1049,13 +1139,14 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
err = call(&drv->wiphy, dev, &params);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1065,22 +1156,29 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
int err;
struct net_device *dev;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto unlock_rtnl;
if (!drv->ops->del_beacon) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
err = drv->ops->del_beacon(&drv->wiphy, dev);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1246,30 +1344,32 @@ static int nl80211_dump_station(struct sk_buff *skb,
return -EINVAL;
}
- netdev = dev_get_by_index(&init_net, ifidx);
- if (!netdev)
- return -ENODEV;
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(&init_net, ifidx);
+ if (!netdev) {
+ err = -ENODEV;
+ goto out_rtnl;
+ }
dev = cfg80211_get_dev_from_ifindex(ifidx);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
- goto out_put_netdev;
+ goto out_rtnl;
}
if (!dev->ops->dump_station) {
- err = -ENOSYS;
+ err = -EOPNOTSUPP;
goto out_err;
}
- rtnl_lock();
-
while (1) {
err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
break;
if (err)
- goto out_err_rtnl;
+ goto out_err;
if (nl80211_send_station(skb,
NETLINK_CB(cb->skb).pid,
@@ -1285,12 +1385,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
out:
cb->args[1] = sta_idx;
err = skb->len;
- out_err_rtnl:
- rtnl_unlock();
out_err:
cfg80211_put_dev(dev);
- out_put_netdev:
- dev_put(netdev);
+ out_rtnl:
+ rtnl_unlock();
return err;
}
@@ -1311,19 +1409,18 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->get_station) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo);
- rtnl_unlock();
-
if (err)
goto out;
@@ -1340,10 +1437,12 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
out_free:
nlmsg_free(msg);
-
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1411,9 +1510,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
if (err)
@@ -1424,15 +1525,16 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, &params);
- rtnl_unlock();
out:
if (params.vlan)
dev_put(params.vlan);
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1474,9 +1576,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
&params.station_flags))
return -EINVAL;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
if (err)
@@ -1487,15 +1591,21 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, &params);
- rtnl_unlock();
out:
if (params.vlan)
dev_put(params.vlan);
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1509,22 +1619,25 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->del_station) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
err = drv->ops->del_station(&drv->wiphy, dev, mac_addr);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1605,22 +1718,29 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
return -EINVAL;
}
- netdev = dev_get_by_index(&init_net, ifidx);
- if (!netdev)
- return -ENODEV;
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(&init_net, ifidx);
+ if (!netdev) {
+ err = -ENODEV;
+ goto out_rtnl;
+ }
dev = cfg80211_get_dev_from_ifindex(ifidx);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
- goto out_put_netdev;
+ goto out_rtnl;
}
if (!dev->ops->dump_mpath) {
- err = -ENOSYS;
+ err = -EOPNOTSUPP;
goto out_err;
}
- rtnl_lock();
+ if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
while (1) {
err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx,
@@ -1628,7 +1748,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
if (err == -ENOENT)
break;
if (err)
- goto out_err_rtnl;
+ goto out_err;
if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1643,12 +1763,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
out:
cb->args[1] = path_idx;
err = skb->len;
- out_err_rtnl:
- rtnl_unlock();
out_err:
cfg80211_put_dev(dev);
- out_put_netdev:
- dev_put(netdev);
+ out_rtnl:
+ rtnl_unlock();
return err;
}
@@ -1670,19 +1788,23 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->get_mpath) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
- err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo);
- rtnl_unlock();
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo);
if (err)
goto out;
@@ -1699,10 +1821,12 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
out_free:
nlmsg_free(msg);
-
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1723,22 +1847,35 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->change_mpath) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -1758,22 +1895,35 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->add_mpath) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1787,22 +1937,25 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->del_mpath) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
err = drv->ops->del_mpath(&drv->wiphy, dev, dst);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1835,22 +1988,30 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
}
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->change_bss) {
err = -EOPNOTSUPP;
goto out;
}
- rtnl_lock();
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
err = drv->ops->change_bss(&drv->wiphy, dev, &params);
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -1945,10 +2106,12 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
struct nlattr *pinfoattr;
struct sk_buff *msg;
+ rtnl_lock();
+
/* Look up our device */
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->get_mesh_params) {
err = -EOPNOTSUPP;
@@ -1956,9 +2119,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
}
/* Get the mesh params */
- rtnl_lock();
err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params);
- rtnl_unlock();
if (err)
goto out;
@@ -2007,13 +2168,16 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
err = genlmsg_unicast(msg, info->snd_pid);
goto out;
-nla_put_failure:
+ nla_put_failure:
genlmsg_cancel(msg, hdr);
err = -EMSGSIZE;
-out:
+ out:
/* Cleanup */
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -2060,9 +2224,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
parent_attr, nl80211_meshconf_params_policy))
return -EINVAL;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
if (!drv->ops->set_mesh_params) {
err = -EOPNOTSUPP;
@@ -2109,14 +2275,15 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
nla_get_u16);
/* Apply changes */
- rtnl_lock();
err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask);
- rtnl_unlock();
out:
/* cleanup */
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -2262,43 +2429,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
-static int nl80211_set_mgmt_extra_ie(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct cfg80211_registered_device *drv;
- int err;
- struct net_device *dev;
- struct mgmt_extra_ie_params params;
-
- memset(&params, 0, sizeof(params));
-
- if (!info->attrs[NL80211_ATTR_MGMT_SUBTYPE])
- return -EINVAL;
- params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]);
- if (params.subtype > 15)
- return -EINVAL; /* FC Subtype field is 4 bits (0..15) */
-
- if (info->attrs[NL80211_ATTR_IE]) {
- params.ies = nla_data(info->attrs[NL80211_ATTR_IE]);
- params.ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
- }
-
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
- if (err)
- return err;
-
- if (drv->ops->set_mgmt_extra_ie) {
- rtnl_lock();
- err = drv->ops->set_mgmt_extra_ie(&drv->wiphy, dev, &params);
- rtnl_unlock();
- } else
- err = -EOPNOTSUPP;
-
- cfg80211_put_dev(drv);
- dev_put(dev);
- return err;
-}
-
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *drv;
@@ -2312,9 +2442,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
enum ieee80211_band band;
size_t ie_len;
+ rtnl_lock();
+
err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
if (err)
- return err;
+ goto out_rtnl;
wiphy = &drv->wiphy;
@@ -2323,11 +2455,14 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- rtnl_lock();
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
if (drv->scan_req) {
err = -EBUSY;
- goto out_unlock;
+ goto out;
}
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
@@ -2335,7 +2470,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
n_channels++;
if (!n_channels) {
err = -EINVAL;
- goto out_unlock;
+ goto out;
}
} else {
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
@@ -2349,7 +2484,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
if (n_ssids > wiphy->max_scan_ssids) {
err = -EINVAL;
- goto out_unlock;
+ goto out;
}
if (info->attrs[NL80211_ATTR_IE])
@@ -2363,7 +2498,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ ie_len, GFP_KERNEL);
if (!request) {
err = -ENOMEM;
- goto out_unlock;
+ goto out;
}
request->channels = (void *)((char *)request + sizeof(*request));
@@ -2434,11 +2569,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
drv->scan_req = NULL;
kfree(request);
}
- out_unlock:
- rtnl_unlock();
out:
cfg80211_put_dev(drv);
dev_put(dev);
+ out_rtnl:
+ rtnl_unlock();
+
return err;
}
@@ -2558,6 +2694,288 @@ static int nl80211_dump_scan(struct sk_buff *skb,
return err;
}
+static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
+{
+ return auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM ||
+ auth_type == NL80211_AUTHTYPE_SHARED_KEY ||
+ auth_type == NL80211_AUTHTYPE_FT ||
+ auth_type == NL80211_AUTHTYPE_NETWORK_EAP;
+}
+
+static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *drv;
+ struct net_device *dev;
+ struct cfg80211_auth_request req;
+ struct wiphy *wiphy;
+ int err;
+
+ rtnl_lock();
+
+ err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!drv->ops->auth) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ wiphy = &drv->wiphy;
+ memset(&req, 0, sizeof(req));
+
+ req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ req.chan = ieee80211_get_channel(
+ wiphy,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (!req.chan) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (info->attrs[NL80211_ATTR_SSID]) {
+ req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ }
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+ req.auth_type =
+ nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(req.auth_type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = drv->ops->auth(&drv->wiphy, dev, &req);
+
+out:
+ cfg80211_put_dev(drv);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *drv;
+ struct net_device *dev;
+ struct cfg80211_assoc_request req;
+ struct wiphy *wiphy;
+ int err;
+
+ rtnl_lock();
+
+ err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!drv->ops->assoc) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC] ||
+ !info->attrs[NL80211_ATTR_SSID]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ wiphy = &drv->wiphy;
+ memset(&req, 0, sizeof(req));
+
+ req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ req.chan = ieee80211_get_channel(
+ wiphy,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (!req.chan) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ err = drv->ops->assoc(&drv->wiphy, dev, &req);
+
+out:
+ cfg80211_put_dev(drv);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *drv;
+ struct net_device *dev;
+ struct cfg80211_deauth_request req;
+ struct wiphy *wiphy;
+ int err;
+
+ rtnl_lock();
+
+ err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!drv->ops->deauth) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ wiphy = &drv->wiphy;
+ memset(&req, 0, sizeof(req));
+
+ req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_REASON_CODE]) {
+ req.reason_code =
+ nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ if (req.reason_code == 0) {
+ /* Reason Code 0 is reserved */
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ err = drv->ops->deauth(&drv->wiphy, dev, &req);
+
+out:
+ cfg80211_put_dev(drv);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *drv;
+ struct net_device *dev;
+ struct cfg80211_disassoc_request req;
+ struct wiphy *wiphy;
+ int err;
+
+ rtnl_lock();
+
+ err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!drv->ops->disassoc) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ wiphy = &drv->wiphy;
+ memset(&req, 0, sizeof(req));
+
+ req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_REASON_CODE]) {
+ req.reason_code =
+ nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ if (req.reason_code == 0) {
+ /* Reason Code 0 is reserved */
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ err = drv->ops->disassoc(&drv->wiphy, dev, &req);
+
+out:
+ cfg80211_put_dev(drv);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
static struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -2725,12 +3143,6 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = NL80211_CMD_SET_MGMT_EXTRA_IE,
- .doit = nl80211_set_mgmt_extra_ie,
- .policy = nl80211_policy,
- .flags = GENL_ADMIN_PERM,
- },
- {
.cmd = NL80211_CMD_TRIGGER_SCAN,
.doit = nl80211_trigger_scan,
.policy = nl80211_policy,
@@ -2741,6 +3153,33 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.dumpit = nl80211_dump_scan,
},
+ {
+ .cmd = NL80211_CMD_AUTHENTICATE,
+ .doit = nl80211_authenticate,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_ASSOCIATE,
+ .doit = nl80211_associate,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_DEAUTHENTICATE,
+ .doit = nl80211_deauthenticate,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_DISASSOCIATE,
+ .doit = nl80211_disassociate,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+static struct genl_multicast_group nl80211_mlme_mcgrp = {
+ .name = "mlme",
};
/* multicast groups */
@@ -2887,6 +3326,71 @@ nla_put_failure:
nlmsg_free(msg);
}
+static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len,
+ enum nl80211_commands cmd)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf, size_t len)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_AUTHENTICATE);
+}
+
+void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf,
+ size_t len)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE);
+}
+
+void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf,
+ size_t len)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_DEAUTHENTICATE);
+}
+
+void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf,
+ size_t len)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_DISASSOCIATE);
+}
+
/* initialisation/exit functions */
int nl80211_init(void)
@@ -2915,6 +3419,10 @@ int nl80211_init(void)
if (err)
goto err_out;
+ err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp);
+ if (err)
+ goto err_out;
+
return 0;
err_out:
genl_unregister_family(&nl80211_fam);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index e65a3c38c52f..b77af4ab80be 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -3,7 +3,6 @@
#include "core.h"
-#ifdef CONFIG_NL80211
extern int nl80211_init(void);
extern void nl80211_exit(void);
extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
@@ -12,30 +11,17 @@ extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
struct net_device *netdev);
extern void nl80211_send_reg_change_event(struct regulatory_request *request);
-#else
-static inline int nl80211_init(void)
-{
- return 0;
-}
-static inline void nl80211_exit(void)
-{
-}
-static inline void nl80211_notify_dev_rename(
- struct cfg80211_registered_device *rdev)
-{
-}
-static inline void
-nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
-{}
-static inline void nl80211_send_scan_aborted(
- struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
-{}
-static inline void
-nl80211_send_reg_change_event(struct regulatory_request *request)
-{
-}
-#endif /* CONFIG_NL80211 */
+extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len);
+extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len);
+extern void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len);
+extern void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len);
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index eb8b8ed16155..6327e1617acb 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -122,9 +122,14 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
#ifdef CONFIG_WIRELESS_OLD_REGULATORY
static char *ieee80211_regdom = "US";
+#else
+static char *ieee80211_regdom = "00";
+#endif
+
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
+#ifdef CONFIG_WIRELESS_OLD_REGULATORY
/*
* We assume 40 MHz bandwidth for the old regulatory work.
* We make emphasis we are using the exact same frequencies
@@ -1415,16 +1420,6 @@ new_request:
return r;
}
- /*
- * Note: When CONFIG_WIRELESS_OLD_REGULATORY is enabled
- * AND if CRDA is NOT present nothing will happen, if someone
- * wants to bother with 11d with OLD_REG you can add a timer.
- * If after x amount of time nothing happens you can call:
- *
- * return set_regdom(country_ie_regdomain);
- *
- * to intersect with the static rd
- */
return call_crda(last_request->alpha2);
}
@@ -1601,6 +1596,10 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
assert_cfg80211_lock();
+ if (unlikely(last_request->initiator !=
+ NL80211_REGDOM_SET_BY_COUNTRY_IE))
+ return false;
+
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy)
@@ -1663,7 +1662,9 @@ void regulatory_hint_11d(struct wiphy *wiphy,
* we optimize an early check to exit out early if we don't have to
* do anything
*/
- if (likely(wiphy_idx_valid(last_request->wiphy_idx))) {
+ if (likely(last_request->initiator ==
+ NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy_idx_valid(last_request->wiphy_idx))) {
struct cfg80211_registered_device *drv_last_ie;
drv_last_ie =
@@ -2022,28 +2023,21 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
*/
BUG_ON(!country_ie_regdomain);
+ BUG_ON(rd == country_ie_regdomain);
- if (rd != country_ie_regdomain) {
- /*
- * Intersect what CRDA returned and our what we
- * had built from the Country IE received
- */
+ /*
+ * Intersect what CRDA returned and our what we
+ * had built from the Country IE received
+ */
- intersected_rd = regdom_intersect(rd, country_ie_regdomain);
+ intersected_rd = regdom_intersect(rd, country_ie_regdomain);
- reg_country_ie_process_debug(rd, country_ie_regdomain,
- intersected_rd);
+ reg_country_ie_process_debug(rd,
+ country_ie_regdomain,
+ intersected_rd);
- kfree(country_ie_regdomain);
- country_ie_regdomain = NULL;
- } else {
- /*
- * This would happen when CRDA was not present and
- * OLD_REGULATORY was enabled. We intersect our Country
- * IE rd and what was set on cfg80211 originally
- */
- intersected_rd = regdom_intersect(rd, cfg80211_regdomain);
- }
+ kfree(country_ie_regdomain);
+ country_ie_regdomain = NULL;
if (!intersected_rd)
return -EINVAL;
@@ -2135,15 +2129,18 @@ int regulatory_init(void)
/*
* The old code still requests for a new regdomain and if
* you have CRDA you get it updated, otherwise you get
- * stuck with the static values. We ignore "EU" code as
- * that is not a valid ISO / IEC 3166 alpha2
+ * stuck with the static values. Since "EU" is not a valid
+ * ISO / IEC 3166 alpha2 code we can't expect userpace to
+ * give us a regulatory domain for it. We need last_request
+ * iniitalized though so lets just send a request which we
+ * know will be ignored... this crap will be removed once
+ * OLD_REG dies.
*/
- if (ieee80211_regdom[0] != 'E' || ieee80211_regdom[1] != 'U')
- err = regulatory_hint_core(ieee80211_regdom);
+ err = regulatory_hint_core(ieee80211_regdom);
#else
cfg80211_regdomain = cfg80211_world_regdom;
- err = regulatory_hint_core("00");
+ err = regulatory_hint_core(ieee80211_regdom);
#endif
if (err) {
if (err == -ENOMEM)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 280dbcd02c15..2a00e362f5fe 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -80,7 +80,8 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
bool expired = false;
list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
- if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
+ if (bss->hold ||
+ !time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
continue;
list_del(&bss->list);
rb_erase(&bss->rbn, &dev->bss_tree);
@@ -471,6 +472,30 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
}
EXPORT_SYMBOL(cfg80211_unlink_bss);
+void cfg80211_hold_bss(struct cfg80211_bss *pub)
+{
+ struct cfg80211_internal_bss *bss;
+
+ if (!pub)
+ return;
+
+ bss = container_of(pub, struct cfg80211_internal_bss, pub);
+ bss->hold = true;
+}
+EXPORT_SYMBOL(cfg80211_hold_bss);
+
+void cfg80211_unhold_bss(struct cfg80211_bss *pub)
+{
+ struct cfg80211_internal_bss *bss;
+
+ if (!pub)
+ return;
+
+ bss = container_of(pub, struct cfg80211_internal_bss, pub);
+ bss->hold = false;
+}
+EXPORT_SYMBOL(cfg80211_unhold_bss);
+
#ifdef CONFIG_WIRELESS_EXT
int cfg80211_wext_siwscan(struct net_device *dev,
struct iw_request_info *info,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index b84a9b4fe96a..0fd1db6e95bb 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -66,6 +66,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
struct cfg80211_registered_device *rdev;
struct vif_params vifparams;
enum nl80211_iftype type;
+ int ret;
if (!wdev)
return -EOPNOTSUPP;
@@ -96,10 +97,16 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
return -EINVAL;
}
+ if (type == wdev->iftype)
+ return 0;
+
memset(&vifparams, 0, sizeof(vifparams));
- return rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type,
- NULL, &vifparams);
+ ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type,
+ NULL, &vifparams);
+ WARN_ON(!ret && wdev->iftype != type);
+
+ return ret;
}
EXPORT_SYMBOL(cfg80211_wext_siwmode);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 9ca17b1ce52e..ed80af8ca5fb 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1035,6 +1035,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
sx25.sx25_addr = x25->dest_addr;
}
+ /* Sanity check the packet size */
+ if (len > 65535) {
+ rc = -EMSGSIZE;
+ goto out;
+ }
+
SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
/* Build a packet */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 62a5425cc6aa..82271720d970 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1615,7 +1615,7 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk)
spin_lock_bh(&xfrm_state_lock);
list_del(&walk->all);
- spin_lock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_walk_done);