default x2apic cluster mode on platforms
supporting x2apic.
- x86_mrst_timer= [X86-32,APBT]
- Choose timer option for x86 Moorestown MID platform.
+ x86_intel_mid_timer= [X86-32,APBT]
+ Choose timer option for x86 Intel MID platform.
Two valid options are apbt timer only and lapic timer
plus one apbt timer for broadcast timer.
- x86_mrst_timer=apbt_only | lapic_and_apbt
+ x86_intel_mid_timer=apbt_only | lapic_and_apbt
xen_emul_unplug= [HW,X86,XEN]
Unplug Xen emulated devices
if X86_WANT_INTEL_MID
config X86_INTEL_MID
- bool
-
-config X86_MDFLD
- bool "Medfield MID platform"
+ bool "Intel MID platform"
depends on PCI
depends on PCI_GOANY
depends on X86_IO_APIC
- select X86_INTEL_MID
select SFI
+ select INTEL_SCU_IPC
+ select X86_PLATFORM_DEVICES
+ select ARCH_HAVE_CUSTOM_GPIO_H
+ ---help---
+ Intel MID is Intel's Low Power Intel Architecture (LPIA) based Mobile
+ Internet Device(MID) platform.
+ Unlike standard x86 PCs, Intel MID does not have many legacy devices
+ nor standard legacy replacement devices/features. e.g. It does not
+ contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
+config X86_MDFLD
+ bool "Medfield MID platform"
+ depends on X86_INTEL_MID
select DW_APB_TIMER
select APB_TIMER
select I2C
select SPI
- select INTEL_SCU_IPC
- select X86_PLATFORM_DEVICES
select MFD_INTEL_MSIC
---help---
Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
- Internet Device(MID) platform.
+ Internet Device(MID) platform.
Unlike standard x86 PCs, Medfield does not have many legacy devices
nor standard legacy replacement devices/features. e.g. Medfield does
not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+config ATOM_SOC_POWER
+ bool "Select Atom SOC Power"
+
+choice
+ prompt "Select PMU support"
+ depends on ATOM_SOC_POWER
+ default REMOVEME_INTEL_ATOM_MDFLD_POWER
+
+config REMOVEME_INTEL_ATOM_MDFLD_POWER
+ bool "Medfield"
+
+config REMOVEME_INTEL_ATOM_CLV_POWER
+ bool "Clovertrail"
+
+config REMOVEME_INTEL_ATOM_MRFLD_POWER
+ bool "Merrifield"
+
+endchoice
+
+config INTEL_DEBUG_FEATURE
+ bool "Debug feature interface on Intel MID platform"
+ depends on X86_INTEL_MID
+ ---help---
+ Provides an interface to list the debug features
+ that are enabled on an Intel MID platform. The
+ enabling of the debug features depends on the mode
+ the device is in (e.g. manufacturing, production,
+ end user, etc...).
+
endif
config X86_INTEL_LPSS
as it is off-chip. APB timers are always running regardless of CPU
C states, they are used as per CPU clockevent device when possible.
+config ARCH_NR_GPIO
+ int
+ depends on ARCH_HAVE_CUSTOM_GPIO_H
+ default 512 if X86_INTEL_MID
+ default 0
+ help
+ Maximum number of GPIOs in the system.
+
+ If unsure, leave the default value.
+
# Mark as expert because too many people got it wrong.
# The code disables itself when not needed.
config DMI
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
+config MSLM
+ bool "Intel Silvermont (Atom)"
+ ---help---
+
+ Select this for the Intel Silvermont (Atom) platform. Intel Atom
+ CPUs have an in-order pipelining architecture and thus can benefit
+ from accordingly optimized code. Use a recent GCC with specific
+ Atom support in order to fully benefit from selecting this option.
+
config GENERIC_CPU
bool "Generic-x86-64"
depends on X86_64
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || MPSC
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MSLM || MVIAC7 || X86_GENERIC || GENERIC_CPU
default "4" if MELAN || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MSLM
config X86_USE_3DNOW
def_bool y
config X86_TSC
def_bool y
- depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
+ depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MSLM) && !X86_NUMAQ) || X86_64
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
+ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MSLM
# this should be set for all -march=.. options where the compiler
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MSLM || MGEODE_LX)
config X86_MINIMUM_CPU_FAMILY
int
$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+ cflags-$(CONFIG_MSLM) += $(call cc-option,-march=slm) \
+ $(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
KBUILD_CFLAGS += $(cflags-y)
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+cflags-$(CONFIG_MSLM) += $(call cc-option,-march=slm,$(call cc-option,-march=core2,-march=i686)) \
+ $(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
# AMD Elan support
cflags-$(CONFIG_MELAN) += -march=i486
--- /dev/null
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/x86 3.10.17 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION="-poky-edison"
+CONFIG_LOCALVERSION_AUTO=n
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_NET_NS=y
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
+# CONFIG_UPTIME_LIMITED_KERNEL is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+CONFIG_X86_WANT_INTEL_MID=y
+CONFIG_X86_INTEL_MID=y
+# CONFIG_X86_MDFLD is not set
+CONFIG_ATOM_SOC_POWER=y
+# CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER is not set
+# CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER is not set
+CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_MSLM=y
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_APB_TIMER is not set
+CONFIG_ARCH_NR_GPIO=512
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+# CONFIG_X86_MCE_AMD is not set
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_HIGHPTE is not set
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_X86_SMAP=y
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_PM_TRACE_RTC is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+CONFIG_X86_SFI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCIE_PME=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_SECMARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=y
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_L2TP=y
+# CONFIG_L2TP_DEBUGFS is not set
+# CONFIG_L2TP_V3 is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIUART_3WIRE is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+CONFIG_CFG80211_DEVELOPER_WARNINGS=y
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+CONFIG_EMMC_IPANIC=y
+CONFIG_EMMC_IPANIC_PLABEL="panic"
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+CONFIG_BCM_BT_LPM=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_VIRTIO_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+# CONFIG_ETHERNET is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_WIFI_PLATFORM_DATA=y
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_INTEL_MID is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_PWM_BEEPER is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+CONFIG_N_GSM=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MRST_MAX3110=y
+CONFIG_SERIAL_MFD_HSU=y
+CONFIG_SERIAL_MFD_HSU_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+CONFIG_I2C_DESIGNWARE_CORE_FORK=y
+CONFIG_I2C_DESIGNWARE_PCI_FORK=y
+# CONFIG_I2C_DESIGNWARE_PLATFORM_FORK is not set
+CONFIG_I2C_DW_SPEED_MODE_DEBUG=y
+# CONFIG_I2C_PMIC is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_INTEL_MID_SSP=y
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_SPI_DW_MID_DMA=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_TS5500 is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_WM8994 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+CONFIG_GPIO_LANGWELL=y
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_GPIO_MSIC is not set
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PMIC_CCSM=y
+CONFIG_BQ24261_CHARGER=y
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_BATTERY_MAX17042=y
+# CONFIG_BATTERY_INTEL_MID is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+CONFIG_INTEL_MCU=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+CONFIG_MSIC_GPADC=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+CONFIG_SENSORS_THERMAL_MRFLD=y
+CONFIG_SOC_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_INTEL_SCU_WATCHDOG is not set
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+CONFIG_MFD_INTEL_MSIC=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+CONFIG_MFD_WM8994=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_WM8994=y
+CONFIG_REGULATOR_PMIC_BASIN_COVE=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+# CONFIG_VIDEO_V4L2_INT_DEVICE is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GSPCA_BENQ is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_CPIA1 is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
+# CONFIG_USB_GSPCA_JL2005BCD is not set
+# CONFIG_USB_GSPCA_KINECT is not set
+# CONFIG_USB_GSPCA_KONICA is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_NW80X is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_OV534_9 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7302 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SE401 is not set
+# CONFIG_USB_GSPCA_SN9C2028 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SPCA1528 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_SQ930X is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_STV0680 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TOPRO is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_VICAM is not set
+# CONFIG_USB_GSPCA_XIRLINK_CIT is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_USB_SN9C102 is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_AU8522_V4L is not set
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HDMI=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PWM is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_COMPRESS_OFFLOAD is not set
+# CONFIG_SND_EFFECTS_OFFLOAD is not set
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DMA_SGBUF=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=y
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_MFLD_MACHINE is not set
+CONFIG_SND_INTEL_SST=y
+CONFIG_SND_MRFLD_MACHINE=y
+CONFIG_SND_SST_PLATFORM=y
+CONFIG_SST_MRFLD_DPCM=y
+CONFIG_SND_SST_MACHINE=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_WM_HUBS=y
+CONFIG_SND_SOC_WM8994=y
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_DWC3=y
+# CONFIG_USB_DWC3_HOST is not set
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_DUAL_ROLE is not set
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+# CONFIG_USB_DWC3_INTEL_BYT is not set
+CONFIG_USB_DWC3_DEVICE_INTEL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+
+#
+# Debugging features
+#
+# CONFIG_USB_DWC3_DEBUG is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=m
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_OMAP_CONTROL_USB is not set
+# CONFIG_OMAP_USB3 is not set
+# CONFIG_SAMSUNG_USBPHY is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_RCAR_PHY is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_LIBCOMPOSITE=m
+CONFIG_USB_F_ACM=m
+CONFIG_USB_U_SERIAL=m
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=y
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8187SE is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_DX_SEP is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+# CONFIG_SCA3000 is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD5930 is not set
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+# CONFIG_AD9850 is not set
+# CONFIG_AD9852 is not set
+# CONFIG_AD9910 is not set
+# CONFIG_AD9951 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16260 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+CONFIG_IIO_SYSFS_TRIGGER=m
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_CSR_WIFI is not set
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_SENSORS_HDAPS is not set
+CONFIG_INTEL_SCU_IPC=y
+CONFIG_INTEL_SCU_IPC_INTR_MODE=y
+# CONFIG_INTEL_SCU_IPC_POLL_MODE is not set
+CONFIG_INTEL_SCU_IPC_UTIL=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+# CONFIG_INTEL_MFLD_THERMAL is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+CONFIG_INTEL_SCU_FLIS=y
+CONFIG_INTEL_PSH_IPC=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=y
+# CONFIG_STE_MODEM_RPROC is not set
+CONFIG_INTEL_MID_REMOTEPROC=y
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+
+#
+# DEVFREQ Drivers
+#
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+# CONFIG_IIO_BUFFER_CB is not set
+CONFIG_IIO_KFIFO_BUF=y
+CONFIG_IIO_TRIGGERED_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+
+#
+# Accelerometers
+#
+# CONFIG_KXSD9 is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+CONFIG_TI_ADS7955_ADC=y
+CONFIG_IIO_BASINCOVE_GPADC=y
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD5686 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+CONFIG_PWM_INTEL_MID=y
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_VFAT_FS_NO_DUALNAMES is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_AUFS_FS=m
+CONFIG_AUFS_BRANCH_MAX_127=y
+# CONFIG_AUFS_BRANCH_MAX_511 is not set
+# CONFIG_AUFS_BRANCH_MAX_1023 is not set
+# CONFIG_AUFS_BRANCH_MAX_32767 is not set
+CONFIG_AUFS_SBILIST=y
+# CONFIG_AUFS_HNOTIFY is not set
+# CONFIG_AUFS_EXPORT is not set
+# CONFIG_AUFS_RDU is not set
+# CONFIG_AUFS_PROC_MAP is not set
+# CONFIG_AUFS_SP_IATTR is not set
+# CONFIG_AUFS_SHWH is not set
+# CONFIG_AUFS_BR_RAMFS is not set
+# CONFIG_AUFS_BR_FUSE is not set
+CONFIG_AUFS_BDEV_LOOP=y
+# CONFIG_AUFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_DEF_FILE_IO_SIZE=4096
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+# CONFIG_DEBUG_KMEMLEAK_TEST is not set
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+CONFIG_SPARSE_RCU_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_NX_TEST=m
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SELINUX=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="selinux"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTODEV is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_586=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+# CONFIG_CRC32_SLICEBY8 is not set
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_BIT=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
--- /dev/null
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/i386 3.10.17 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
+# CONFIG_UPTIME_LIMITED_KERNEL is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_ALL=y
+# CONFIG_MODULE_SIG_SHA1 is not set
+# CONFIG_MODULE_SIG_SHA224 is not set
+CONFIG_MODULE_SIG_SHA256=y
+# CONFIG_MODULE_SIG_SHA384 is not set
+# CONFIG_MODULE_SIG_SHA512 is not set
+CONFIG_MODULE_SIG_HASH="sha256"
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+CONFIG_X86_WANT_INTEL_MID=y
+CONFIG_X86_INTEL_MID=y
+# CONFIG_X86_MDFLD is not set
+CONFIG_ATOM_SOC_POWER=y
+# CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER is not set
+# CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER is not set
+CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_MSLM=y
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_APB_TIMER is not set
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+# CONFIG_X86_MCE_AMD is not set
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_HIGHPTE is not set
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_X86_SMAP=y
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_PM_TRACE_RTC is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+CONFIG_X86_SFI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCIE_PME=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_SECMARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=y
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_L2TP=y
+# CONFIG_L2TP_DEBUGFS is not set
+# CONFIG_L2TP_V3 is not set
+# CONFIG_BRIDGE is not set
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+# CONFIG_CFG80211_WEXT is not set
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+CONFIG_NFC=y
+# CONFIG_NFC_NCI is not set
+# CONFIG_NFC_HCI is not set
+
+#
+# Near Field Communication (NFC) devices
+#
+# CONFIG_NFC_PN533 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SX4 is not set
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=y
+# CONFIG_SATA_HIGHBANK is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+
+#
+# PATA SFF controllers with BMDMA
+#
+# CONFIG_PATA_ALI is not set
+CONFIG_PATA_AMD=y
+# CONFIG_PATA_ARASAN_CF is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87415 is not set
+CONFIG_PATA_OLDPIIX=y
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_SC1200 is not set
+CONFIG_PATA_SCH=y
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+CONFIG_PATA_MPIIX=y
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_RZ1000 is not set
+
+#
+# Generic fallback / legacy drivers
+#
+CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID10 is not set
+# CONFIG_MD_RAID456 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+CONFIG_DM_ZERO=y
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_VIRTIO_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_PCNET32 is not set
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_ARM_AT91_ETHER is not set
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+CONFIG_BNX2=y
+# CONFIG_CNIC is not set
+CONFIG_TIGON3=y
+# CONFIG_BNX2X is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_ULI526X is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+CONFIG_NE2K_PCI=y
+CONFIG_NET_VENDOR_NVIDIA=y
+CONFIG_FORCEDETH=y
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_R8169=y
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_WIFI_PLATFORM_DATA=y
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_INTEL_MID is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+CONFIG_N_GSM=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MRST_MAX3110=y
+# CONFIG_SERIAL_MFD_HSU is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_SPI_DW_MID_DMA=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_TS5500 is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_WM8994 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+CONFIG_GPIO_LANGWELL=y
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_GPIO_MSIC is not set
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_BATTERY_MAX17042=y
+# CONFIG_BATTERY_INTEL_MID is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+CONFIG_MSIC_GPADC=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+CONFIG_SENSORS_THERMAL_MRFLD=y
+CONFIG_SOC_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_INTEL_SCU_WATCHDOG is not set
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+CONFIG_MFD_INTEL_MSIC=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+CONFIG_MFD_WM8994=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_WM8994=y
+CONFIG_REGULATOR_PMIC_BASIN_COVE=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_V4L2_INT_DEVICE is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+# CONFIG_MEDIA_USB_SUPPORT is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_AU8522_V4L is not set
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HDMI=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_COMPRESS_OFFLOAD=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DMA_SGBUF=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=y
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_MFLD_MACHINE is not set
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_DWC3=y
+# CONFIG_USB_DWC3_HOST is not set
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_DUAL_ROLE is not set
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+# CONFIG_USB_DWC3_INTEL_BYT is not set
+CONFIG_USB_DWC3_DEVICE_INTEL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+
+#
+# Debugging features
+#
+# CONFIG_USB_DWC3_DEBUG is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_OMAP_CONTROL_USB is not set
+# CONFIG_OMAP_USB3 is not set
+# CONFIG_SAMSUNG_USBPHY is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_RCAR_PHY is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=10
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=y
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8187SE is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_DX_SEP is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD5930 is not set
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+# CONFIG_AD9850 is not set
+# CONFIG_AD9852 is not set
+# CONFIG_AD9910 is not set
+# CONFIG_AD9951 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16260 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+# CONFIG_ANDROID_TIMED_GPIO is not set
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_SENSORS_HDAPS is not set
+CONFIG_INTEL_SCU_IPC=y
+CONFIG_INTEL_SCU_IPC_INTR_MODE=y
+# CONFIG_INTEL_SCU_IPC_POLL_MODE is not set
+CONFIG_INTEL_SCU_IPC_UTIL=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+# CONFIG_INTEL_MFLD_THERMAL is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+CONFIG_INTEL_SCU_FLIS=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=y
+# CONFIG_STE_MODEM_RPROC is not set
+CONFIG_INTEL_MID_REMOTEPROC=y
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+
+#
+# DEVFREQ Drivers
+#
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+# CONFIG_IIO_BUFFER is not set
+# CONFIG_IIO_TRIGGER is not set
+
+#
+# Accelerometers
+#
+# CONFIG_KXSD9 is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+CONFIG_IIO_BASINCOVE_GPADC=y
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD5686 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_VFAT_FS_NO_DUALNAMES is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_AUFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_DEF_FILE_IO_SIZE=4096
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+# CONFIG_DEBUG_KMEMLEAK_TEST is not set
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+CONFIG_SPARSE_RCU_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_NX_TEST=m
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SELINUX=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="selinux"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTODEV is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_586=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+# CONFIG_CRC32_SLICEBY8 is not set
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_BIT=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
static inline unsigned long apbt_quick_calibrate(void) {return 0; }
static inline void apbt_time_init(void) { }
+static inline void apbt_setup_secondary_clock(void) { }
#endif
#endif /* ASM_X86_APBT_H */
--- /dev/null
+/*
+ * Copyright (C) 2009 Google, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef BCM_BT_LMP_H
+#define BCM_BT_LMP_H
+
+#include <linux/serial_core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+/* Uart driver must call this every time it beings TX, to ensure
+* this driver keeps WAKE asserted during TX. Called with uart
+* spinlock held. */
+extern void bcm_bt_lpm_exit_lpm_locked(struct device *dev,
+ struct hci_dev *hdev);
+
+struct bcm_bt_lpm_platform_data {
+ int gpio_wake; /* CPU -> BCM wakeup gpio */
+ int gpio_host_wake; /* BCM -> CPU wakeup gpio */
+ int int_host_wake; /* BCM -> CPU wakeup irq */
+ int gpio_enable; /* GPIO enable/disable BT/FM */
+
+ int port; /* UART port to use with BT/FM */
+ /*
+ * Callback to request the uart driver to clock off.
+ * Called with uart spinlock held.
+ */
+ void (*uart_disable)(struct device *tty);
+ /*
+ * Callback to request the uart driver to clock on.
+ * Called with uart spinlock held.
+ */
+ void (*uart_enable)(struct device *tty);
+};
+
+#endif
FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
#ifdef CONFIG_X86_INTEL_MID
FIX_LNW_VRTC,
+ FIX_CLOCK_CTL,
#endif
__end_of_permanent_fixed_addresses,
+#ifndef _ARCH_X86_GPIO_H
+#define _ARCH_X86_GPIO_H
+
+#if CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#endif
+
+#include <asm-generic/gpio.h>
+
+/* The trivial gpiolib dispatchers */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#else /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+
#ifndef __LINUX_GPIO_H
#warning Include linux/gpio.h instead of asm/gpio.h
#include <linux/gpio.h>
#endif
+
+#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+
+#endif /* _ARCH_X86_GPIO_H */
--- /dev/null
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_pcihelpers.h>
+
+#ifdef CONFIG_SFI
+extern int get_gpio_by_name(const char *name);
+extern void install_irq_resource(struct platform_device *pdev, int irq);
+#else
+static inline int get_gpio_by_name(const char *name) { return -ENODEV; }
+/* Dummy function to prevent compilation error in byt */
+static inline void install_irq_resource(struct platform_device *pdev, int irq)
+{};
+#endif
+
+extern int intel_mid_pci_init(void);
+extern void *get_oem0_table(void);
+extern void intel_delayed_device_register(void *dev,
+ void (*delayed_callback)(void *dev_desc));
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern struct devs_id *get_device_id(u8 type, char *name);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+extern void *get_oem0_table(void);
+extern void register_rpmsg_service(char *name, int id, u32 addr);
+extern int sdhci_pci_request_regulators(void);
+
+/* Define soft platform ID to comply with the OEMB table format. But SPID is not supported */
+#define INTEL_PLATFORM_SSN_SIZE 32
+struct soft_platform_id {
+ u16 customer_id; /*Defines the final customer for the product */
+ u16 vendor_id; /* Defines who owns the final product delivery */
+ u16 manufacturer_id; /* Defines who build the hardware. This can be
+ * different for the same product */
+ u16 platform_family_id; /* Defined by vendor and defines the family of
+ * the product with the same root components */
+ u16 product_line_id; /* Defined by vendor and defines the name of the
+ * product. This can be used to differentiate the
+ * feature set for the same product family (low
+ * cost vs full feature). */
+ u16 hardware_id; /* Defined by vendor and defines the physical hardware
+ * component set present on the PCB/FAB */
+ u8 fru[SPID_FRU_SIZE]; /* Field Replaceabl Unit */
+} __packed;
+
+/* OEMB table */
+struct sfi_table_oemb {
+ struct sfi_table_header header;
+ u32 board_id;
+ u32 board_fab;
+ u8 iafw_major_version;
+ u8 iafw_main_version;
+ u8 val_hooks_major_version;
+ u8 val_hooks_minor_version;
+ u8 ia_suppfw_major_version;
+ u8 ia_suppfw_minor_version;
+ u8 scu_runtime_major_version;
+ u8 scu_runtime_minor_version;
+ u8 ifwi_major_version;
+ u8 ifwi_minor_version;
+ struct soft_platform_id spid;
+ u8 ssn[INTEL_PLATFORM_SSN_SIZE];
+} __packed;
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+ char name[SFI_NAME_LEN + 1];
+ u8 type;
+ u8 delay;
+ void *(*get_platform_data)(void *info);
+ void (*device_handler)(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev);
+ /* Custom handler for devices */
+ u8 trash_itp;/* true if this driver uses pin muxed with XDB connector */
+};
+
+#define SD_NAME_SIZE 16
+/**
+ * struct sd_board_info - template for device creation
+ * @name: Initializes sdio_device.name; identifies the driver.
+ * @bus_num: board-specific identifier for a given SDIO controller.
+ * @board_ref_clock: Initializes sd_device.board_ref_clock;
+ * @platform_data: Initializes sd_device.platform_data; the particular
+ * data stored there is driver-specific.
+ *
+ */
+struct sd_board_info {
+ char name[SD_NAME_SIZE];
+ int bus_num;
+ unsigned short addr;
+ u32 board_ref_clock;
+ void *platform_data;
+};
+
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+ INTEL_CPU_CHIP_NOTMID = 0,
+ /* 1 was Moorestown */
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW,
+ INTEL_MID_CPU_CHIP_TANGIER,
+ INTEL_MID_CPU_CHIP_VALLEYVIEW2,
+ INTEL_MID_CPU_CHIP_ANNIEDALE,
+ INTEL_MID_CPU_CHIP_CARBONCANYON,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ * structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+ void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)[cpuid] = \
+ get_##cpuname##_ops,
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL) \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW) \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+#ifdef CONFIG_X86_INTEL_MID
+ return __intel_mid_cpu_chip;
+#else
+ return INTEL_CPU_CHIP_NOTMID;
+#endif
+}
+
+enum intel_mid_timer_options {
+ INTEL_MID_TIMER_DEFAULT,
+ INTEL_MID_TIMER_APBT_ONLY,
+ INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
+
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK 0x7
+#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX 8
+
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_mrfld_console;
+extern void mrfld_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(const char *);
+
+extern struct console early_pti_console;
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+extern void intel_psh_devices_create(void);
+extern void intel_psh_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ (1024)
+/*#define MRST_VRTC_PGOFFSET (0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+enum intel_mid_sim_type {
+ INTEL_MID_CPU_SIMULATION_NONE = 0,
+ INTEL_MID_CPU_SIMULATION_VP,
+ INTEL_MID_CPU_SIMULATION_SLE,
+ INTEL_MID_CPU_SIMULATION_HVP,
+};
+extern enum intel_mid_sim_type __intel_mid_sim_platform;
+static inline enum intel_mid_sim_type intel_mid_identify_sim(void)
+{
+#ifdef CONFIG_X86_INTEL_MID
+ return __intel_mid_sim_platform;
+#else
+ return INTEL_MID_CPU_SIMULATION_NONE;
+#endif
+}
+
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+extern void pstore_ram_reserve_memory(void);
+
+#endif /* _ASM_X86_INTEL_MID_H */
--- /dev/null
+#ifndef __INTEL_BASINCOVE_GPADC_H__
+#define __INTEL_BASINCOVE_GPADC_H__
+
+#define GPADC_VBAT (1 << 0)
+#define GPADC_BATID (1 << 1)
+#define GPADC_IBAT (1 << 2)
+#define GPADC_PMICTEMP (1 << 3)
+#define GPADC_BATTEMP0 (1 << 4)
+#define GPADC_BATTEMP1 (1 << 5)
+#define GPADC_SYSTEMP0 (1 << 6)
+#define GPADC_SYSTEMP1 (1 << 7)
+#define GPADC_SYSTEMP2 (1 << 8)
+#define GPADC_CH_NUM 9
+
+#define MBATTEMP (1 << 2)
+#define MSYSTEMP (1 << 3)
+#define MBATT (1 << 4)
+#define MVIBATT (1 << 5)
+#define MCCTICK (1 << 7)
+
+#define GPADC_RSL(channel, res) (res->data[ffs(channel)-1])
+
+struct gpadc_regmap_t {
+ char *name;
+ int cntl; /* GPADC Conversion Control Bit indicator */
+ int rslth; /* GPADC Conversion Result Register Addr High */
+ int rsltl; /* GPADC Conversion Result Register Addr Low */
+};
+
+struct gpadc_regs_t {
+ u16 gpadcreq;
+ u16 gpadcreq_irqen;
+ u16 gpadcreq_busy;
+ u16 mirqlvl1;
+ u16 mirqlvl1_adc;
+ u16 adc1cntl;
+ u16 adcirq;
+ u16 madcirq;
+};
+
+struct iio_dev;
+
+struct intel_basincove_gpadc_platform_data {
+ int channel_num;
+ unsigned long intr;
+ u8 intr_mask;
+ struct iio_map *gpadc_iio_maps;
+ struct gpadc_regmap_t *gpadc_regmaps;
+ struct gpadc_regs_t *gpadc_regs;
+ const struct iio_chan_spec *gpadc_channels;
+};
+
+struct gpadc_result {
+ int data[GPADC_CH_NUM];
+};
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+ int ch, struct gpadc_result *res);
+
+int intel_basincove_gpadc_sample(int ch, struct gpadc_result *res);
+#endif
--- /dev/null
+#ifndef __INTEL_BASINCOVE_OCD_H__
+#define __INTEL_BASINCOVE_OCD_H__
+
+#define DRIVER_NAME "bcove_bcu"
+#define DEVICE_NAME "mrfl_pmic_bcu"
+
+/* Generic bit representaion macros */
+#define B0 (1 << 0)
+#define B1 (1 << 1)
+#define B2 (1 << 2)
+#define B3 (1 << 3)
+#define B4 (1 << 4)
+#define B5 (1 << 5)
+#define B6 (1 << 6)
+#define B7 (1 << 7)
+
+/* 30 seconds delay macro for VWARN1 interrupt Unmask (enable) */
+#define VWARN2_INTR_EN_DELAY (30 * HZ)
+
+/* IRQ registers */
+#define BCUIRQ 0x05
+#define IRQLVL1 0x01
+#define MIRQLVL1 0x0C
+
+/* Status registers */
+#define S_BCUINT 0x3B
+#define S_BCUCTRL 0x49
+
+/* PMIC SRAM address for BCU register */
+#define PMIC_SRAM_BCU_ADDR 0xFFFFF614
+#define IOMAP_LEN 1
+
+#define NUM_VOLT_LEVELS 3
+#define NUM_CURR_LEVELS 2
+
+#define VWARN_EN_MASK B3
+#define ICCMAXVCC_EN_MASK B6
+
+#define MVWARN1_MASK B0
+#define MVWARN2_MASK B1
+#define MVCRIT_MASK B2
+
+#define MVCRIT B2
+#define MVWARN2 B1
+#define MVWARN1 B0
+
+#define ICCMAXVCC_EN (1 << 6)
+#define VWARN_EN (1 << 3)
+#define VCRIT_SHUTDOWN (1 << 4)
+
+#define BCU_ALERT (1 << 3)
+#define VWARN1_IRQ (1 << 0)
+#define VWARN2_IRQ (1 << 1)
+#define VCRIT_IRQ (1 << 2)
+#define GSMPULSE_IRQ (1 << 3)
+#define TXPWRTH_IRQ (1 << 4)
+
+/* Number of configurable thresholds for current and voltage */
+#define NUM_THRESHOLDS 8
+
+/* BCU real time status flags for corresponding input signals */
+#define SVWARN1 (1<<0)
+#define SVWARN2 (1<<1)
+#define SVCRIT (1<<2)
+
+/* S_BCUCTRL register status bits */
+#define S_CAMFLTORCH B3
+#define S_CAMFLDIS B2
+#define S_BCUDISW2 B1
+
+#define S_BCUDISW2_MASK B1
+#define S_CAMFLDIS_MASK B2
+#define S_CAMFLTORCH_MASK B3
+
+/* check whether bit is sticky or not by checking 5th bit */
+#define IS_STICKY(data) (!!(data & 0x10))
+
+/* check whether signal asserted for VW1/VW2/VC */
+#define IS_ASSRT_ON_VW1(data) (!!(data & 0x01))
+#define IS_ASSRT_ON_VW2(data) (!!(data & 0x02))
+#define IS_ASSRT_ON_VC(data) (!!(data & 0x04))
+
+/* Configuration registers that monitor the voltage drop */
+#define VWARN1_CFG 0x3C
+#define VWARN2_CFG 0x3D
+#define VCRIT_CFG 0x3E
+#define ICCMAXVSYS_CFG 0x3F
+#define ICCMAXVCC_CFG 0x40
+#define ICCMAXVNN_CFG 0x41
+
+/* Behaviour registers */
+#define VFLEXSRC_BEH 0x42
+#define VFLEXDIS_BEH 0x43
+#define VIBDIS_BEH 0x44
+#define CAMFLTORCH_BEH 0x45
+#define CAMFLDIS_BEH 0x46
+#define BCUDISW2_BEH 0x47
+#define BCUDISCRIT_BEH 0x48
+
+/*IRQ Mask Register*/
+#define MBCUIRQ 0x10
+
+#define MRFL_SMIP_SRAM_ADDR 0xFFFCE000
+
+/* SMIP offset address from where the BCU related info should be read */
+#define BCU_SMIP_OFFSET 0x3BA
+
+/* No of Bytes we have to read from SMIP from BCU_SMIP_BASE*/
+#define NUM_SMIP_BYTES 14
+
+/* Max length of the register name string */
+#define MAX_REGNAME_LEN 15
+
+/* String to send the uevent along with env info to user space */
+#define EVT_STR "BCUEVT="
+
+/* Macro to get the mode of acess for the BCU registers */
+#define MODE(m) (((m != S_BCUINT) && (m != BCUIRQ) && (m != IRQLVL1)) \
+ ? (S_IRUGO | S_IWUSR) : S_IRUGO)
+
+/* Generic macro to assign the parameters (reg name and address) */
+#define reg_info(x) { .name = #x, .addr = x, .mode = MODE(x) }
+
+/* Generic macro to fill the environmental data for bcu uevent */
+#define get_envp(evt) (EVT_STR#evt)
+
+/*
+* These values are read from SMIP.
+* SMIP contains these entries - default register configurations
+* BCU is programmed to these default values during boot time.
+*/
+
+struct ocd_bcove_config_data {
+ uint8_t vwarn1_cfg;
+ uint8_t vwarn2_cfg;
+ uint8_t vcrit_cfg;
+ uint8_t iccmaxvsys_cfg;
+ uint8_t iccmaxvcc_cfg;
+ uint8_t iccmaxvnn_cfg;
+ uint8_t vflexsrc_beh;
+ uint8_t vflexdis_beh;
+ uint8_t vibdis_beh;
+ uint8_t camfltorch_beh;
+ uint8_t camfldis_beh;
+ uint8_t bcudisw2_beh;
+ uint8_t bcudiscrit_beh;
+ uint8_t mbcuirq;
+} __packed;
+
+struct ocd_platform_data {
+ int (*bcu_config_data) (struct ocd_bcove_config_data *);
+};
+
+struct bcu_reg_info {
+ char name[MAX_REGNAME_LEN]; /* register name */
+ u16 addr; /* offset address */
+ u16 mode; /* permission mode */
+};
+
+#endif
+
--- /dev/null
+#ifndef __INTEL_MID_GPADC_H__
+#define __INTEL_MID_GPADC_H__
+
+struct intel_mid_gpadc_platform_data {
+ unsigned long intr;
+};
+
+#define CH_NEED_VREF (1 << 8)
+#define CH_NEED_VCALIB (1 << 9)
+#define CH_NEED_ICALIB (1 << 10)
+
+int intel_mid_gpadc_gsmpulse_sample(int *vol, int *cur);
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...);
+int get_gpadc_sample(void *handle, int sample_count, int *buffer);
+void intel_mid_gpadc_free(void *handle);
+void *intel_mid_gpadc_alloc(int count, ...);
+void *gpadc_alloc_channels(int count, int *channel_info);
+#endif
+
--- /dev/null
+#ifndef __INTEL_MID_HSU_H__
+#define __INTEL_MID_HSU_H__
+
+#define hsu_port_func_max 4
+
+enum {
+ hsu_port0,
+ hsu_port1,
+ hsu_port2,
+ hsu_port_share,
+ hsu_port_max,
+ hsu_dma,
+};
+
+enum {
+ bt_port,
+ modem_port,
+ gps_port,
+ debug_port,
+};
+
+enum {
+ hsu_intel,
+ hsu_dw,
+};
+
+struct hsu_port_cfg {
+ int type;
+ int hw_ip;
+ int index;
+ char *name;
+ int idle;
+ int has_alt;
+ int alt;
+ int force_suspend;
+ int preamble;
+ int hw_context_save;
+ int hw_ctrl_cts;
+ struct device *dev;
+ int (*hw_init)(struct device *dev, int port);
+ void(*hw_set_alt)(int port);
+ void(*hw_set_rts)(int port, int value);
+ void(*hw_suspend)(int port, struct device *dev, irq_handler_t wake_isr);
+ void(*hw_suspend_post)(int port);
+ void(*hw_resume)(int port, struct device *dev);
+ unsigned int (*hw_get_clk)(void);
+ void (*wake_peer)(struct device *tty);
+ void (*set_clk)(unsigned int m, unsigned int n,
+ void __iomem *addr);
+ void (*hw_reset)(void __iomem *addr);
+};
+
+
+void intel_mid_hsu_suspend(int port, struct device *dev,
+ irq_handler_t wake_isr);
+void intel_mid_hsu_resume(int port, struct device *dev);
+void intel_mid_hsu_rts(int port, int value);
+void intel_mid_hsu_switch(int port);
+int intel_mid_hsu_init(struct device *dev, int port);
+int intel_mid_hsu_func_to_port(unsigned int func);
+unsigned int intel_mid_hsu_get_clk(void);
+int hsu_register_board_info(void *inf);
+void intel_mid_hsu_suspend_post(int port);
+struct device *intel_mid_hsu_set_wake_peer(int port,
+ void (*wake_peer)(struct device *));
+void intel_mid_hsu_reset(void __iomem *addr);
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+ void __iomem *addr);
+#endif
--- /dev/null
+/*
+ * Access to message bus through three registers
+ * in CUNIT(0:0:0) PCI configuration space.
+ * MSGBUS_CTRL_REG(0xD0):
+ * 31:24 = message bus opcode
+ * 23:16 = message bus port
+ * 15:8 = message bus address, low 8 bits.
+ * 7:4 = message bus byte enables
+ * MSGBUS_CTRL_EXT_REG(0xD8):
+ * 31:8 = message bus address, high 24 bits.
+ * MSGBUS_DATA_REG(0xD4):
+ * hold the data for write or read
+ */
+#define PCI_ROOT_MSGBUS_CTRL_REG 0xD0
+#define PCI_ROOT_MSGBUS_DATA_REG 0xD4
+#define PCI_ROOT_MSGBUS_CTRL_EXT_REG 0xD8
+#define PCI_ROOT_MSGBUS_READ 0x10
+#define PCI_ROOT_MSGBUS_WRITE 0x11
+#define PCI_ROOT_MSGBUS_DWORD_ENABLE 0xf0
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd);
+u32 intel_mid_msgbus_read32(u8 port, u32 addr);
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
+u32 intel_mid_soc_stepping(void);
--- /dev/null
+#ifndef __INTEL_MID_POWERBTN_H__
+#define __INTEL_MID_POWERBTN_H__
+
+struct intel_msic_power_btn_platform_data {
+ u32 pbstat;
+ u16 pb_level;
+ u16 irq_lvl1_mask;
+ int (*irq_ack)(struct intel_msic_power_btn_platform_data *);
+};
+
+#define MSIC_PB_LEN 1
+#define MSIC_PWRBTNM (1 << 0)
+
+#endif
--- /dev/null
+#ifndef __INTEL_MID_PWM_H__
+#define __INTEL_MID_PWM_H__
+
+#define MAX_DUTYCYCLE_PERCENTAGE 100
+
+enum {
+ PWM_LED = 0,
+ PWM_VIBRATOR,
+ PWM_LCD_BACKLIGHT,
+ PWM_NUM,
+};
+
+struct intel_mid_pwm_device_data {
+ u16 reg_clkdiv0;
+ u16 reg_clkdiv1;
+ u16 reg_dutycyc;
+ u8 val_clkdiv0;
+ u8 val_clkdiv1;
+};
+
+struct intel_mid_pwm_platform_data {
+ int pwm_num;
+ struct intel_mid_pwm_device_data *ddata;
+ u16 reg_clksel;
+ u8 val_clksel;
+};
+
+int intel_mid_pwm(int id, int value);
+#endif
+
--- /dev/null
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND 0xA0
+#define RP_IPC_SIMPLE_COMMAND 0xA1
+#define RP_IPC_RAW_COMMAND 0xA2
+
+#define RP_PMIC_ACCESS 0xFF
+#define RP_DFU_REQUEST 0xFE
+#define RP_SET_WATCHDOG 0xF8
+#define RP_FLIS_ACCESS 0xF5
+#define RP_GET_FW_REVISION 0xF4
+#define RP_COLD_BOOT 0xF3
+#define RP_COLD_RESET 0xF1
+#define RP_COLD_OFF 0x80
+#define RP_MIP_ACCESS 0xEC
+#define RP_GET_HOBADDR 0xE5
+#define RP_OSC_CLK_CTRL 0xE6
+#define RP_S0IX_COUNTER 0xE8
+#define RP_WRITE_OSNIB 0xE4
+#define RP_FW_UPDATE 0xFE
+#define RP_VRTC 0xFA
+#define RP_PMDB 0xE0
+#define RP_INDIRECT_WRITE 0x05
+
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ * values.
+ */
+#define RP_PMIC_GPIO 0X02
+#define RP_PMIC_AUDIO 0x03
+#define RP_MSIC_GPIO 0x05
+#define RP_MSIC_AUDIO 0x06
+#define RP_MSIC_OCD 0x07
+#define RP_MSIC_BATTERY 0XEF
+#define RP_MSIC_THERMAL 0x09
+#define RP_MSIC_POWER_BTN 0x10
+#define RP_IPC 0X11
+#define RP_IPC_UTIL 0X12
+#define RP_FW_ACCESS 0X13
+#define RP_UMIP_ACCESS 0x14
+#define RP_OSIP_ACCESS 0x15
+#define RP_MSIC_ADC 0x16
+#define RP_BQ24192 0x17
+#define RP_MSIC_CLV_AUDIO 0x18
+#define RP_PMIC_CCSM 0x19
+#define RP_PMIC_I2C 0x20
+#define RP_MSIC_MRFLD_AUDIO 0x21
+#define RP_MSIC_PWM 0x22
+#define RP_MSIC_KPD_LED 0x23
+#define RP_BCOVE_ADC 0x24
+#define RP_BCOVE_THERMAL 0x25
+#define RP_MRFL_OCD 0x26
+#define RP_FW_LOGGING 0x27
+#define RP_PMIC_CHARGER 0x28
+
+enum rproc_type {
+ RPROC_SCU = 0,
+ RPROC_PSH,
+ RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+ enum rproc_type type;
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+ struct list_head node;
+};
+
+struct rpmsg_ns_list {
+ struct list_head list;
+ struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+ int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+ struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+ const char *name;
+ const char *firmware;
+ const struct rproc_ops *ops;
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
--- /dev/null
+#ifndef _INTEL_MID_RPMSG_H_
+#define _INTEL_MID_RPMSG_H_
+
+#include <asm/scu_ipc_rpmsg.h>
+#include <linux/rpmsg.h>
+
+#ifdef ANDROID_BUILD
+#include <linux/wakelock.h>
+#endif
+
+#define RPMSG_TX_TIMEOUT (5 * HZ)
+
+struct rpmsg_instance {
+ struct rpmsg_channel *rpdev;
+ struct mutex instance_lock;
+ struct tx_ipc_msg *tx_msg;
+ struct rx_ipc_msg *rx_msg;
+ struct mutex rx_lock;
+ struct completion reply_arrived;
+ struct rpmsg_endpoint *endpoint;
+};
+
+struct rpmsg_lock {
+ struct mutex lock;
+ int locked_prev; /* locked prev flag */
+ atomic_t pending;
+};
+
+extern int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub, u8 *in,
+ u32 *out, u32 inlen,
+ u32 outlen);
+
+extern int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub, u8 *in,
+ u32 *out, u32 inlen,
+ u32 outlen, u32 sptr,
+ u32 dptr);
+
+extern int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub);
+
+extern int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+ struct rpmsg_instance **pInstance);
+
+extern void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+ struct rpmsg_instance **pInstance);
+
+extern void init_rpmsg_instance(struct rpmsg_instance *instance);
+
+extern int rpmsg_send_generic_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+ u32 *out, u32 outlen);
+
+extern int rpmsg_send_generic_simple_command(u32 cmd, u32 sub);
+
+extern int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+ u8 *in, u32 inlen,
+ u32 *out, u32 outlen,
+ u32 dptr, u32 sptr);
+
+struct rpmsg_device_data {
+ char name[RPMSG_NAME_SIZE];
+ struct rpmsg_channel *rpdev;
+ struct rpmsg_instance *rpmsg_instance;
+};
+
+enum rpmsg_ipc_command_type {
+ RPMSG_IPC_COMMAND = 0,
+ RPMSG_IPC_SIMPLE_COMMAND,
+ RPMSG_IPC_RAW_COMMAND,
+ RPMSG_IPC_COMMAND_TYPE_NUM,
+};
+
+extern void rpmsg_global_lock(void);
+extern void rpmsg_global_unlock(void);
+
+#endif
--- /dev/null
+#ifndef __INTEL_MID_THERMAL_H__
+#define __INTEL_MID_THERMAL_H__
+
+#include <linux/thermal.h>
+
+#define BPTHERM_NAME "bptherm"
+#define SKIN0_NAME "skin0"
+#define SKIN1_NAME "skin1"
+#define MSIC_DIE_NAME "msicdie"
+#define MSIC_SYS_NAME "sys"
+#define SYSTHERM2 "systherm2"
+/**
+ * struct intel_mid_thermal_sensor - intel_mid_thermal sensor information
+ * @name: name of the sensor
+ * @index: index number of sensor
+ * @slope: slope used for temp calculation
+ * @intercept: intercept used for temp calculation
+ * @adc_channel: adc channel id|flags
+ * @direct: If true then direct conversion is used.
+ * @priv: private sensor data
+ * @temp_correlation: temp correlation function
+ */
+struct intel_mid_thermal_sensor {
+ char name[THERMAL_NAME_LENGTH];
+ int index;
+ long slope;
+ long intercept;
+ int adc_channel;
+ bool direct;
+ void *priv;
+ int (*temp_correlation)(void *info, long temp, long *res);
+};
+
+/**
+ * struct soc_throttle_data - SoC level power limits for thermal throttling
+ * @power_limit: power limit value
+ * @floor_freq: The CPU frequency may not go below this value
+ */
+struct soc_throttle_data {
+ int power_limit;
+ int floor_freq;
+};
+
+/**
+ * struct intel_mid_thermal_platform_data - Platform data for
+ * intel mid thermal driver
+ *
+ * @num_sensors: Maximum number of sensors supported
+ * @sensors: sensor info
+ * @soc_cooling: True or false
+ */
+struct intel_mid_thermal_platform_data {
+ int num_sensors;
+ struct intel_mid_thermal_sensor *sensors;
+ bool soc_cooling;
+};
+
+/**
+ * struct skin1_private_info - skin1 sensor private data
+ *
+ * @dependent: dependency on other sensors
+ 0 - no dependency,
+ > 0 - depends on other sensors
+ * @sensors: dependent sensor address.
+ */
+struct skin1_private_info {
+ int dependent;
+ struct intel_mid_thermal_sensor **sensors;
+};
+
+/* skin0 sensor temperature correlation function*/
+int skin0_temp_correlation(void *info, long temp, long *res);
+/* skin1 sensor temperature correlation function*/
+int skin1_temp_correlation(void *info, long temp, long *res);
+/* bptherm sensor temperature correlation function*/
+int bptherm_temp_correlation(void *info, long temp, long *res);
+#endif
-#ifndef _MRST_VRTC_H
-#define _MRST_VRTC_H
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
extern unsigned char vrtc_cmos_read(unsigned char reg);
extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
--- /dev/null
+#ifndef _ASM_X86_INTEL_MIP_H_
+#define _ASM_X86_INTEL_MIP_H_
+
+#include <asm/intel-mid.h>
+
+/* SMIP property related definitions */
+#define SCU_MIP_DEV_NAME "intel_scu_mip"
+#define SMIP_NUM_CONFIG_PROPS 6
+#define SMIP_MAX_PROP_LEN 4
+
+enum platform_prop {
+ USB_COMPLIANCE,
+ CHARGE_TERMINATION,
+ SHUTDOWN_METHODOLOGY,
+ MOS_TRANS_CAPACITY,
+ NFC_RESV_CAPACITY,
+ TEMP_CRIT_SHUTDOWN,
+};
+
+struct smip_platform_prop {
+ unsigned int offset;
+ unsigned int len;
+ bool is_bit_field;
+ unsigned int mask;
+};
+
+struct scu_mip_platform_data {
+ struct smip_platform_prop smip_prop[SMIP_NUM_CONFIG_PROPS];
+};
+
+int get_smip_property_by_name(enum platform_prop);
+#endif
--- /dev/null
+#ifndef _ASM_X86_INTEL_PSH_IPC_H_
+#define _ASM_X86_INTEL_PSH_IPC_H_
+
+#define CHANNEL_BUSY (1 << 31)
+#define PSH_IPC_CONTINUE (1 << 30)
+
+struct psh_msg {
+ u32 msg;
+ u32 param;
+};
+
+enum psh_channel {
+ PSH_SEND_CH0 = 0,
+ PSH_SEND_CH1,
+ PSH_SEND_CH2,
+ PSH_SEND_CH3,
+ NUM_IA2PSH_IPC,
+ PSH_RECV_CH0 = NUM_IA2PSH_IPC,
+ PSH_RECV_CH1,
+ PSH_RECV_CH2,
+ PSH_RECV_CH3,
+ PSH_RECV_END,
+ NUM_PSH2IA_IPC = PSH_RECV_END - PSH_RECV_CH0,
+ NUM_ALL_CH = NUM_IA2PSH_IPC + NUM_PSH2IA_IPC,
+};
+
+typedef void(*psh_channel_handle_t)(u32 msg, u32 param, void *data);
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+ int ch, int timeout);
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data);
+void intel_psh_ipc_unbind(int ch);
+
+void intel_psh_ipc_disable_irq(void);
+void intel_psh_ipc_enable_irq(void);
+#endif
--- /dev/null
+#ifndef _ASM_X86_INTEL_SCU_FLIS_H_
+#define _ASM_X86_INTEL_SCU_FLIS_H_
+
+enum flis_param_t {
+ PULL,
+ MUX,
+ OPEN_DRAIN,
+};
+
+/* For MERR */
+
+#define PULL_MASK ((7 << 4) | (3 << 8))
+#define MUX_MASK (0xF << 12)
+#define OPEN_DRAIN_MASK BIT(21)
+
+#define PULL_UP (1 << 8)
+#define PULL_DOWN (2 << 8)
+#define R2Kohms (0 << 4)
+#define R20Kohms (1 << 4)
+#define R50Kohms (2 << 4)
+#define R910ohms (3 << 4)
+
+#define UP_2K (PULL_UP | R2Kohms)
+#define UP_20K (PULL_UP | R20Kohms)
+#define UP_50K (PULL_UP | R50Kohms)
+#define UP_910 (PULL_UP | R910ohms)
+#define DOWN_2K (PULL_DOWN | R2Kohms)
+#define DOWN_20K (PULL_DOWN | R20Kohms)
+#define DOWN_50K (PULL_DOWN | R50Kohms)
+#define DOWN_910 (PULL_DOWN | R910ohms)
+
+#define OD_DISABLE (0 << 21)
+#define OD_ENABLE (1 << 21)
+
+#define MUX_EN_INPUT_EN (2 << 12)
+#define INPUT_EN (1 << 12)
+#define MUX_EN_OUTPUT_EN (8 << 12)
+#define OUTPUT_EN (4 << 12)
+
+/* Add prefix "tng_" to avoid name duplication with ctp pins */
+enum tng_pinname_t {
+ tng_usb_ulpi_0_clk = 0,
+ tng_usb_ulpi_0_data_0 = 1,
+ tng_usb_ulpi_0_data_1 = 2,
+ tng_usb_ulpi_0_data_2 = 3,
+ tng_usb_ulpi_0_data_3 = 4,
+ tng_usb_ulpi_0_data_4 = 5,
+ tng_usb_ulpi_0_data_5 = 6,
+ tng_usb_ulpi_0_data_6 = 7,
+ tng_usb_ulpi_0_data_7 = 8,
+ tng_usb_ulpi_0_dir = 9,
+ tng_usb_ulpi_0_nxt = 10,
+ tng_usb_ulpi_0_refclk = 11,
+ tng_usb_ulpi_0_stp = 12,
+ tng_emmc_0_clk = 13,
+ tng_emmc_0_cmd = 14,
+ tng_emmc_0_d_0 = 15,
+ tng_emmc_0_d_1 = 16,
+ tng_emmc_0_d_2 = 17,
+ tng_emmc_0_d_3 = 18,
+ tng_emmc_0_d_4 = 19,
+ tng_emmc_0_d_5 = 20,
+ tng_emmc_0_d_6 = 21,
+ tng_emmc_0_d_7 = 22,
+ tng_emmc_0_rst_b = 23,
+ tng_gp_emmc_1_clk = 24,
+ tng_gp_emmc_1_cmd = 25,
+ tng_gp_emmc_1_d_0 = 26,
+ tng_gp_emmc_1_d_1 = 27,
+ tng_gp_emmc_1_d_2 = 28,
+ tng_gp_emmc_1_d_3 = 29,
+ tng_gp_emmc_1_d_4 = 30,
+ tng_gp_emmc_1_d_5 = 31,
+ tng_gp_emmc_1_d_6 = 32,
+ tng_gp_emmc_1_d_7 = 33,
+ tng_gp_emmc_1_rst_b = 34,
+ tng_gp_28 = 35,
+ tng_gp_29 = 36,
+ tng_gp_sdio_0_cd_b = 37,
+ tng_gp_sdio_0_clk = 38,
+ tng_gp_sdio_0_cmd = 39,
+ tng_gp_sdio_0_dat_0 = 40,
+ tng_gp_sdio_0_dat_1 = 41,
+ tng_gp_sdio_0_dat_2 = 42,
+ tng_gp_sdio_0_dat_3 = 43,
+ tng_gp_sdio_0_lvl_clk_fb = 44,
+ tng_gp_sdio_0_lvl_cmd_dir = 45,
+ tng_gp_sdio_0_lvl_dat_dir = 46,
+ tng_gp_sdio_0_lvl_sel = 47,
+ tng_gp_sdio_0_powerdown_b = 48,
+ tng_gp_sdio_0_wp = 49,
+ tng_gp_sdio_1_clk = 50,
+ tng_gp_sdio_1_cmd = 51,
+ tng_gp_sdio_1_dat_0 = 52,
+ tng_gp_sdio_1_dat_1 = 53,
+ tng_gp_sdio_1_dat_2 = 54,
+ tng_gp_sdio_1_dat_3 = 55,
+ tng_gp_sdio_1_powerdown_b = 56,
+ tng_mhsi_acdata = 57,
+ tng_mhsi_acflag = 58,
+ tng_mhsi_acready = 59,
+ tng_mhsi_acwake = 60,
+ tng_mhsi_cadata = 61,
+ tng_mhsi_caflag = 62,
+ tng_mhsi_caready = 63,
+ tng_mhsi_cawake = 64,
+ tng_gp_mslim_0_bclk = 65,
+ tng_gp_mslim_0_bdat = 66,
+ tng_gp_ssp_0_clk = 67,
+ tng_gp_ssp_0_fs = 68,
+ tng_gp_ssp_0_rxd = 69,
+ tng_gp_ssp_0_txd = 70,
+ tng_gp_ssp_1_clk = 71,
+ tng_gp_ssp_1_fs = 72,
+ tng_gp_ssp_1_rxd = 73,
+ tng_gp_ssp_1_txd = 74,
+ tng_gp_ssp_2_clk = 75,
+ tng_gp_ssp_2_fs = 76,
+ tng_gp_ssp_2_rxd = 77,
+ tng_gp_ssp_2_txd = 78,
+ tng_gp_ssp_3_clk = 79,
+ tng_gp_ssp_3_fs = 80,
+ tng_gp_ssp_3_rxd = 81,
+ tng_gp_ssp_3_txd = 82,
+ tng_gp_ssp_4_clk = 83,
+ tng_gp_ssp_4_fs_0 = 84,
+ tng_gp_ssp_4_fs_1 = 85,
+ tng_gp_ssp_4_fs_2 = 86,
+ tng_gp_ssp_4_fs_3 = 87,
+ tng_gp_ssp_4_rxd = 88,
+ tng_gp_ssp_4_txd = 89,
+ tng_gp_ssp_5_clk = 90,
+ tng_gp_ssp_5_fs_0 = 91,
+ tng_gp_ssp_5_fs_1 = 92,
+ tng_gp_ssp_5_fs_2 = 93,
+ tng_gp_ssp_5_fs_3 = 94,
+ tng_gp_ssp_5_rxd = 95,
+ tng_gp_ssp_5_txd = 96,
+ tng_gp_ssp_6_clk = 97,
+ tng_gp_ssp_6_fs = 98,
+ tng_gp_ssp_6_rxd = 99,
+ tng_gp_ssp_6_txd = 100,
+ tng_gp_i2c_1_scl = 101,
+ tng_gp_i2c_1_sda = 102,
+ tng_gp_i2c_2_scl = 103,
+ tng_gp_i2c_2_sda = 104,
+ tng_gp_i2c_3_scl = 105,
+ tng_gp_i2c_3_sda = 106,
+ tng_gp_i2c_4_scl = 107,
+ tng_gp_i2c_4_sda = 108,
+ tng_gp_i2c_5_scl = 109,
+ tng_gp_i2c_5_sda = 110,
+ tng_gp_i2c_6_scl = 111,
+ tng_gp_i2c_6_sda = 112,
+ tng_gp_i2c_7_scl = 113,
+ tng_gp_i2c_7_sda = 114,
+ tng_gp_uart_0_cts = 115,
+ tng_gp_uart_0_rts = 116,
+ tng_gp_uart_0_rx = 117,
+ tng_gp_uart_0_tx = 118,
+ tng_gp_uart_1_cts = 119,
+ tng_gp_uart_1_rts = 120,
+ tng_gp_uart_1_rx = 121,
+ tng_gp_uart_1_tx = 122,
+ tng_gp_uart_2_cts = 123,
+ tng_gp_uart_2_rts = 124,
+ tng_gp_uart_2_rx = 125,
+ tng_gp_uart_2_tx = 126,
+ tng_gp_13 = 127,
+ tng_gp_14 = 128,
+ tng_gp_15 = 129,
+ tng_gp_16 = 130,
+ tng_gp_17 = 131,
+ tng_gp_18 = 132,
+ tng_gp_19 = 133,
+ tng_gp_20 = 134,
+ tng_gp_21 = 135,
+ tng_gp_22 = 136,
+ tng_gp_23 = 137,
+ tng_gp_24 = 138,
+ tng_gp_25 = 139,
+ tng_gp_fast_int_0 = 140,
+ tng_gp_fast_int_1 = 141,
+ tng_gp_fast_int_2 = 142,
+ tng_gp_fast_int_3 = 143,
+ tng_gp_pwm_0 = 144,
+ tng_gp_pwm_1 = 145,
+ tng_gp_camerasb_0 = 146,
+ tng_gp_camerasb_1 = 147,
+ tng_gp_camerasb_2 = 148,
+ tng_gp_camerasb_3 = 149,
+ tng_gp_camerasb_4 = 150,
+ tng_gp_camerasb_5 = 151,
+ tng_gp_camerasb_6 = 152,
+ tng_gp_camerasb_7 = 153,
+ tng_gp_camerasb_8 = 154,
+ tng_gp_camerasb_9 = 155,
+ tng_gp_camerasb_10 = 156,
+ tng_gp_camerasb_11 = 157,
+ tng_gp_clkph_0 = 158,
+ tng_gp_clkph_1 = 159,
+ tng_gp_clkph_2 = 160,
+ tng_gp_clkph_3 = 161,
+ tng_gp_clkph_4 = 162,
+ tng_gp_clkph_5 = 163,
+ tng_gp_hdmi_hpd = 164,
+ tng_gp_intd_dsi_te1 = 165,
+ tng_gp_intd_dsi_te2 = 166,
+ tng_osc_clk_ctrl_0 = 167,
+ tng_osc_clk_ctrl_1 = 168,
+ tng_osc_clk_out_0 = 169,
+ tng_osc_clk_out_1 = 170,
+ tng_osc_clk_out_2 = 171,
+ tng_osc_clk_out_3 = 172,
+ tng_osc_clk_out_4 = 173,
+ tng_resetout_b = 174,
+ tng_xxpmode = 175,
+ tng_xxprdy = 176,
+ tng_xxpreq_b = 177,
+ tng_gp_26 = 178,
+ tng_gp_27 = 179,
+ tng_i2c_0_scl = 180,
+ tng_i2c_0_sda = 181,
+ tng_ierr_b = 182,
+ tng_jtag_tckc = 183,
+ tng_jtag_tdic = 184,
+ tng_jtag_tdoc = 185,
+ tng_jtag_tmsc = 186,
+ tng_jtag_trst_b = 187,
+ tng_prochot_b = 188,
+ tng_rtc_clk = 189,
+ tng_svid_vclk = 190,
+ tng_svid_vdio = 191,
+ tng_thermtrip_b = 192,
+ tng_standby = 193,
+ tng_gp_kbd_dkin_0 = 194,
+ tng_gp_kbd_dkin_1 = 195,
+ tng_gp_kbd_dkin_2 = 196,
+ tng_gp_kbd_dkin_3 = 197,
+ tng_gp_kbd_mkin_0 = 198,
+ tng_gp_kbd_mkin_1 = 199,
+ tng_gp_kbd_mkin_2 = 200,
+ tng_gp_kbd_mkin_3 = 201,
+ tng_gp_kbd_mkin_4 = 202,
+ tng_gp_kbd_mkin_5 = 203,
+ tng_gp_kbd_mkin_6 = 204,
+ tng_gp_kbd_mkin_7 = 205,
+ tng_gp_kbd_mkout_0 = 206,
+ tng_gp_kbd_mkout_1 = 207,
+ tng_gp_kbd_mkout_2 = 208,
+ tng_gp_kbd_mkout_3 = 209,
+ tng_gp_kbd_mkout_4 = 210,
+ tng_gp_kbd_mkout_5 = 211,
+ tng_gp_kbd_mkout_6 = 212,
+ tng_gp_kbd_mkout_7 = 213,
+ tng_gp_0 = 214,
+ tng_gp_1 = 215,
+ tng_gp_2 = 216,
+ tng_gp_3 = 217,
+ tng_gp_4 = 218,
+ tng_gp_5 = 219,
+ tng_gp_6 = 220,
+ tng_gp_7 = 221,
+ tng_gp_8 = 222,
+ tng_gp_9 = 223,
+ tng_gp_10 = 224,
+ tng_gp_11 = 225,
+ tng_gp_12 = 226,
+ tng_gp_mpti_clk = 227,
+ tng_gp_mpti_data_0 = 228,
+ tng_gp_mpti_data_1 = 229,
+ tng_gp_mpti_data_2 = 230,
+ tng_gp_mpti_data_3 = 231,
+ TNG_PIN_NUM,
+};
+
+struct pinstruct_t {
+ bool valid; /* the pin is allowed to be configured or not */
+ u8 bus_address;
+ u8 pullup_offset;
+ u8 pullup_lsb_pos;
+ u8 direction_offset;
+ u8 direction_lsb_pos;
+ u8 open_drain_offset;
+ u8 open_drain_bit;
+};
+
+enum ACCESS_CTRL {
+ readonly = (1 << 0),
+ writable = (1 << 1),
+};
+
+struct pin_mmio_flis_t {
+ u8 access_ctrl; /* mmio flis access control */
+ u32 offset; /* pin offset from flis base address */
+};
+
+struct intel_scu_flis_platform_data {
+ struct pinstruct_t *pin_t;
+ int pin_num;
+ u32 flis_base;
+ u32 flis_len;
+ struct pin_mmio_flis_t *mmio_flis_t;
+};
+
+#define OPS_STR_LEN 10
+
+enum {
+ DBG_SHIM_FLIS_ADDR,
+ DBG_SHIM_OFFSET,
+ DBG_SHIM_DATA,
+
+ DBG_PARAM_VAL,
+ DBG_PARAM_TYPE,
+ DBG_PIN_NAME,
+};
+
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset);
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val);
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val);
+u32 get_flis_value(u32 offset);
+void set_flis_value(u32 value, u32 offset);
+
+#endif
#define _ASM_X86_INTEL_SCU_IPC_H_
#include <linux/notifier.h>
+#include <asm/intel-mid.h>
+/* IPC defines the following message types */
+#define IPCMSG_GET_HOBADDR 0xE5 /* OSHOB access. */
+#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
+#define IPCMSG_MIP_ACCESS 0xEC /* IA MIP access */
+#define IPCMSG_PMDB_CMD 0xE0
#define IPCMSG_WARM_RESET 0xF0
#define IPCMSG_COLD_RESET 0xF1
#define IPCMSG_SOFT_RESET 0xF2
#define IPCMSG_COLD_BOOT 0xF3
-
+#define IPCMSG_COLD_OFF 0x80 /* for TNG only */
+#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+#define IPCMSG_SHIM_CONFIG 0xF5 /* Configure SHIM */
+#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
#define IPCMSG_VRTC 0xFA /* Set vRTC device */
- /* Command id associated with message IPCMSG_VRTC */
- #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
- #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
-
-/* Read single register */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data);
-
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
-
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
-
-/* Read a vector */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
-
-/* Write single register */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
+#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
+#define IPCMSG_OSC_CLK 0xE6 /* Turn on/off osc clock */
+#define IPCMSG_S0IX_COUNTER 0xEB /* Get S0ix residency */
+#define IPCMSG_CLEAR_FABERROR 0xE3 /* Clear fabric error log */
+#define IPCMSG_STORE_NV_DATA 0xCD /* Store the Non Volatile data to RAM */
+
+#define IPC_CMD_UMIP_RD 0
+#define IPC_CMD_UMIP_WR 1
+#define IPC_CMD_SMIP_RD 2
+
+/* Command id associated with message IPCMSG_PCNTRL */
+#define IPC_CMD_PCNTRL_W 0 /* Register write */
+#define IPC_CMD_PCNTRL_R 1 /* Register read */
+#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
+
+#define IPC_ERR_NONE 0
+#define IPC_ERR_CMD_NOT_SUPPORTED 1
+#define IPC_ERR_CMD_NOT_SERVICED 2
+#define IPC_ERR_UNABLE_TO_SERVICE 3
+#define IPC_ERR_CMD_INVALID 4
+#define IPC_ERR_CMD_FAILED 5
+#define IPC_ERR_EMSECURITY 6
+#define IPC_ERR_UNSIGNEDKERNEL 7
+
+#define MSIC_DEBUG_FILE "msic"
+#define MSIC_ALL_DEBUG_FILE "msic_all"
+#define MAX_MSIC_REG 0x3FF
+#define MIN_MSIC_REG 0x0
+
+
+
+/* Command id associated with message IPCMSG_VRTC */
+#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
+#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
+#define IPC_CMD_VRTC_SYNC_RTC 3 /* Sync MSIC/PMIC RTC to VRTC */
+
+/* Command id associated with message IPCMSG_SHIM_CONFIG */
+#define IPC_CMD_SHIM_RD 0 /* SHIM read */
+#define IPC_CMD_SHIM_WR 1 /* SHIM write */
+
+/* check ipc status */
+int intel_scu_ipc_check_status(void);
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
+/* I2C control api */
+int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
+/* Update FW version */
+int intel_scu_ipc_fw_update(void);
+int intel_scu_ipc_mrstfw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_medfw_prepare(void __user *arg);
-/* Write a vector */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned);
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset);
-/* Update single register based on the mask */
-int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+/* NVRAM access */
+u32 intel_scu_ipc_get_nvram_size(void);
+u32 intel_scu_ipc_get_nvram_addr(void);
-/* Issue commands to the SCU with or without data */
-int intel_scu_ipc_simple_command(int cmd, int sub);
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen);
-/* I2C control api */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO 0 /* Audio */
+#define OSC_CLK_CAM0 1 /* Primary camera */
+#define OSC_CLK_CAM1 2 /* Secondary camera */
+#define OSC_CLK_DISP 3 /* Display buffer */
-/* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
extern struct blocking_notifier_head intel_scu_notifier;
--- /dev/null
+#ifndef _ASM_X86_INTEL_SCU_IPCUTIL_H_
+#define _ASM_X86_INTEL_SCU_IPCUTIL_H_
+
+#include <linux/types.h>
+
+/* ioctl commnds */
+#define INTEL_SCU_IPC_REGISTER_READ 0
+#define INTEL_SCU_IPC_REGISTER_WRITE 1
+#define INTEL_SCU_IPC_REGISTER_UPDATE 2
+#define INTEL_SCU_IPC_FW_UPDATE 0xA2
+#define INTEL_SCU_IPC_MEDFIELD_FW_UPDATE 0xA3
+#define INTEL_SCU_IPC_FW_REVISION_GET 0xB0
+#define INTEL_SCU_IPC_FW_REVISION_EXT_GET 0xB1
+#define INTEL_SCU_IPC_S0IX_RESIDENCY 0xB8
+#define INTEL_SCU_IPC_READ_RR_FROM_OSNIB 0xC1
+#define INTEL_SCU_IPC_WRITE_RR_TO_OSNIB 0xC2
+#define INTEL_SCU_IPC_READ_VBATTCRIT 0xC4
+#define INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB 0xC5
+#define INTEL_SCU_IPC_OSC_CLK_CNTL 0xC6
+#define INTEL_SCU_IPC_PMDB_ACCESS 0xD0
+
+#define SIGNED_MOS_ATTR 0x0
+#define SIGNED_COS_ATTR 0x0A
+#define SIGNED_RECOVERY_ATTR 0x0C
+#define SIGNED_POS_ATTR 0x0E
+#define SIGNED_FACTORY_ATTR 0x12
+
+enum intel_scu_ipc_wake_src {
+ WAKE_BATT_INSERT,
+ WAKE_PWR_BUTTON_PRESS,
+ WAKE_RTC_TIMER,
+ WAKE_USB_CHRG_INSERT,
+ WAKE_RESERVED,
+ WAKE_REAL_RESET,
+ WAKE_COLD_BOOT,
+ WAKE_UNKNOWN,
+ WAKE_KERNEL_WATCHDOG_RESET,
+ WAKE_SECURITY_WATCHDOG_RESET,
+ WAKE_WATCHDOG_COUNTER_EXCEEDED,
+ WAKE_POWER_SUPPLY_DETECTED,
+ WAKE_FASTBOOT_BUTTONS_COMBO,
+ WAKE_NO_MATCHING_OSIP_ENTRY,
+ WAKE_CRITICAL_BATTERY,
+ WAKE_INVALID_CHECKSUM,
+ WAKE_FORCED_RESET,
+ WAKE_ACDC_CHRG_INSERT,
+ WAKE_PMIC_WATCHDOG_RESET,
+ WAKE_PLATFORM_WATCHDOG_RESET,
+ WAKE_SC_WATCHDOG_RESET
+};
+
+struct scu_ipc_data {
+ __u32 count; /* No. of registers */
+ __u16 addr[5]; /* Register addresses */
+ __u8 data[5]; /* Register data */
+ __u8 mask; /* Valid for read-modify-write */
+};
+
+struct scu_ipc_version {
+ __u32 count; /* length of version info */
+ __u8 data[16]; /* version data */
+};
+
+struct osc_clk_t {
+ __u32 id; /* clock id */
+ __u32 khz; /* clock frequency */
+};
+
+/* PMDB buffer, cmd, and limits */
+#define PMDB_SIZE 512
+#define PMDB_WMDB_SIZE 76
+#define PMDB_OTPDB_SIZE 384
+#define PMDB_OTPCTL_SIZE 48
+#define PMDB_ACCESS_SIZE 16
+
+#define PMDB_SUB_CMD_R_WMDB 0
+#define PMDB_SUB_CMD_R_OTPDB 1
+#define PMDB_SUB_CMD_W_WMDB 2
+#define PMDB_SUB_CMD_W_OTPDB 3
+#define PMDB_SUB_CMD_R_OTPCTL 4
+
+struct scu_ipc_pmdb_buffer {
+ __u32 sub; /* sub cmd of SCU's PMDB IPC commands */
+ __u32 count; /* length of PMDB buffer */
+ __u32 offset; /* buffer start offset for each PMDB component */
+ __u8 data[PMDB_SIZE]; /* PMDB buffer */
+};
+
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO 0 /* Audio */
+#define OSC_CLK_CAM0 1 /* Primary camera */
+#define OSC_CLK_CAM1 2 /* Secondary camera */
+#define OSC_CLK_DISP 3 /* Display buffer */
+
+#ifdef __KERNEL__
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
+
+enum clk0_mode {
+ CLK0_AUDIENCE = 0x4,
+ CLK0_VIBRA1 = 0x8,
+ CLK0_VIBRA2 = 0x10,
+ CLK0_MSIC = 0x20,
+ CLK0_DEBUG = 0x100,
+ CLK0_QUERY = 0x1000,
+};
+
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode);
+
+/* Helpers to turn on/off msic vprog1 and vprog2 */
+int intel_scu_ipc_msic_vprog1(int on);
+int intel_scu_ipc_msic_vprog2(int on);
+
+/* OSHOB-OS Handoff Buffer read */
+int intel_scu_ipc_get_oshob_base(void);
+int intel_scu_ipc_get_oshob_size(void);
+
+/* SCU trace buffer interface */
+u32 intel_scu_ipc_get_scu_trace_buffer(void);
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void);
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void);
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void);
+
+/* OSNIB interface. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_rr(u8 rr);
+int intel_scu_ipc_read_osnib_rr(u8 *rr);
+int intel_scu_ipc_read_osnib_wd(u8 *wd);
+int intel_scu_ipc_write_osnib_wd(u8 *wd);
+#endif
+
+#endif
--- /dev/null
+#ifndef __INTEL_SCU_PMIC_H__
+#define __INTEL_SCU_PMIC_H__
+
+#include <asm/types.h>
+
+#define KOBJ_PMIC_ATTR(_name, _mode, _show, _store) \
+ struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+extern int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+extern int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+extern int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+extern int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+#endif /*__INTEL_SCU_PMIC_H__ */
--- /dev/null
+/*
+ * intel_soc_debug.h
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef INTEL_SOC_DEBUG_H
+#define INTEL_SOC_DEBUG_H
+
+#define DEBUG_FEATURE_PTI 0x00000001
+#define DEBUG_FEATURE_RTIT 0x00000002
+#define DEBUG_FEATURE_LAKEMORE 0x00000004
+#define DEBUG_FEATURE_SOCHAPS 0x00000008
+#define DEBUG_FEATURE_USB3DFX 0x00000010
+
+/* cpu_has_debug_feature checks whether the debug
+ * feature passed as parameter is enabled.
+ * The passed parameter shall be one (and only one)
+ * of the above values (DEBUG_FEATURE_XXX).
+ * The function returns 1 if the debug feature is
+ * enabled and 0 otherwise.
+ */
+
+#ifdef CONFIG_INTEL_DEBUG_FEATURE
+extern int cpu_has_debug_feature(u32 bit);
+#else
+static inline int cpu_has_debug_feature(u32 bit) { return 0; };
+#endif
+
+#endif
--- /dev/null
+/* intel_sst_mrlfd.h - Common enum of the Merrifield platform
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Samreen Nilofer <samreen.nilofer@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef _INTEL_SST_MRFLD_H
+#define _INTEL_SST_MRFLD_H
+
+enum {
+ MERR_SALTBAY_AUDIO = 0,
+ MERR_SALTBAY_COMPR,
+ MERR_SALTBAY_VOIP,
+ MERR_SALTBAY_PROBE,
+ MERR_SALTBAY_AWARE,
+ MERR_SALTBAY_VAD,
+ MERR_SALTBAY_POWER,
+};
+
+enum {
+ MERR_DPCM_AUDIO = 0,
+ MERR_DPCM_DB,
+ MERR_DPCM_LL,
+ MERR_DPCM_COMPR,
+ MERR_DPCM_VOIP,
+ MERR_DPCM_PROBE,
+};
+
+#endif
#define MODULE_PROC_FAMILY "586MMX "
#elif defined CONFIG_MCORE2
#define MODULE_PROC_FAMILY "CORE2 "
-#elif defined CONFIG_MATOM
+#elif (defined CONFIG_MATOM) || (defined CONFIG_MSLM)
#define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686
#define MODULE_PROC_FAMILY "686 "
+++ /dev/null
-/*
- * mrst.h: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#ifndef _ASM_X86_MRST_H
-#define _ASM_X86_MRST_H
-
-#include <linux/sfi.h>
-
-extern int pci_mrst_init(void);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum mrst_cpu_type {
- /* 1 was Moorestown */
- MRST_CPU_CHIP_PENWELL = 2,
-};
-
-extern enum mrst_cpu_type __mrst_cpu_chip;
-
-#ifdef CONFIG_X86_INTEL_MID
-
-static inline enum mrst_cpu_type mrst_identify_cpu(void)
-{
- return __mrst_cpu_chip;
-}
-
-#else /* !CONFIG_X86_INTEL_MID */
-
-#define mrst_identify_cpu() (0)
-
-#endif /* !CONFIG_X86_INTEL_MID */
-
-enum mrst_timer_options {
- MRST_TIMER_DEFAULT,
- MRST_TIMER_APBT_ONLY,
- MRST_TIMER_LAPIC_APBT,
-};
-
-extern enum mrst_timer_options mrst_timer_options;
-
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define PENWELL_FSB_FREQ_83SKU 83200
-#define PENWELL_FSB_FREQ_100SKU 99840
-
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
-
-extern struct console early_mrst_console;
-extern void mrst_early_console_init(void);
-
-extern struct console early_hsu_console;
-extern void hsu_early_console_init(const char *);
-
-extern void intel_scu_devices_create(void);
-extern void intel_scu_devices_destroy(void);
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ (1024)
-/*#define MRST_VRTC_PGOFFSET (0xc00) */
-
-extern void mrst_rtc_init(void);
-
-#endif /* _ASM_X86_MRST_H */
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+#ifdef CONFIG_ATOM_SOC_POWER
+#define MWAIT_MAX_NUM_CSTATES 10
+#else
+#define MWAIT_MAX_NUM_CSTATES 8
+#endif
+
#endif /* _ASM_X86_MWAIT_H */
--- /dev/null
+/*
+ * platform_byt_audio.h: Baytrail audio platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Omair Md Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BYT_AUDIO_H_
+#define _PLATFORM_BYT_AUDIO_H_
+
+enum {
+ BYT_AUD_AIF1 = 0,
+ BYT_AUD_AIF2,
+ BYT_AUD_COMPR_DEV,
+#ifdef CONFIG_SND_SOC_COMMS_SSP
+ BYT_COMMS_BT,
+ BYT_COMMS_MODEM,
+#endif /* CONFIG_SND_SOC_COMMS_SSP */
+ BYT_AUD_PROBE_DEV,
+};
+
+enum {
+ BYT_CR_AUD_AIF1 = 0,
+ BYT_CR_AUD_COMPR_DEV,
+ BYT_CR_COMMS_BT,
+};
+/* LPE viewpoint addresses */
+/* TODO: move to DSDT */
+#define SST_BYT_IRAM_PHY_START 0xff2c0000
+#define SST_BYT_IRAM_PHY_END 0xff2d4000
+#define SST_BYT_DRAM_PHY_START 0xff300000
+#define SST_BYT_DRAM_PHY_END 0xff320000
+#define SST_BYT_IMR_VIRT_START 0xc0000000 /* virtual addr in LPE */
+#define SST_BYT_IMR_VIRT_END 0xc01fffff
+#define SST_BYT_SHIM_PHY_ADDR 0xff340000
+#define SST_BYT_MBOX_PHY_ADDR 0xff344000
+#define SST_BYT_DMA0_PHY_ADDR 0xff298000
+#define SST_BYT_DMA1_PHY_ADDR 0xff29c000
+#define SST_BYT_SSP0_PHY_ADDR 0xff2a0000
+#define SST_BYT_SSP2_PHY_ADDR 0xff2a2000
+
+#define BYT_FW_MOD_TABLE_OFFSET 0x80000
+#define BYT_FW_MOD_TABLE_SIZE 0x100
+
+#endif
--- /dev/null
+
+/*
+ * platform_sst.h: sst audio platform data header file
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _PLATFORM_SST_H_
+#define _PLATFORM_SST_H_
+
+#include <linux/sfi.h>
+
+#define MAX_NUM_STREAMS_CTP 5
+#define MAX_NUM_STREAMS_MRFLD 25
+#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD
+
+#define SST_MAX_SSP_PORTS 4
+#define SST_MAX_DMA 2
+
+enum {
+ SST_SSP_AUDIO = 0,
+ SST_SSP_MODEM,
+ SST_SSP_BT,
+ SST_SSP_FM,
+};
+
+struct sst_gpio_config {
+ u32 i2s_rx_alt;
+ u32 i2s_tx_alt;
+ u32 i2s_frame;
+ u32 i2s_clock;
+ u32 alt_function;
+};
+
+struct sst_ssp_info {
+ u32 base_add;
+ struct sst_gpio_config gpio;
+ bool gpio_in_use;
+};
+
+struct sst_info {
+ u32 iram_start;
+ u32 iram_end;
+ bool iram_use;
+ u32 dram_start;
+ u32 dram_end;
+ bool dram_use;
+ u32 imr_start;
+ u32 imr_end;
+ bool imr_use;
+ u32 mailbox_start;
+ bool use_elf;
+ bool lpe_viewpt_rqd;
+ unsigned int max_streams;
+ u32 dma_max_len;
+ u8 num_probes;
+};
+
+struct sst_ssp_platform_cfg {
+ u8 ssp_cfg_sst;
+ u8 port_number;
+ u8 is_master;
+ u8 pack_mode;
+ u8 num_slots_per_frame;
+ u8 num_bits_per_slot;
+ u8 active_tx_map;
+ u8 active_rx_map;
+ u8 ssp_frame_format;
+ u8 frame_polarity;
+ u8 serial_bitrate_clk_mode;
+ u8 frame_sync_width;
+ u8 dma_handshake_interface_tx;
+ u8 dma_handshake_interface_rx;
+ u8 network_mode;
+ u8 start_delay;
+ u32 ssp_base_add;
+} __packed;
+
+struct sst_board_config_data {
+ struct sst_ssp_platform_cfg ssp_platform_data[SST_MAX_SSP_PORTS];
+ u8 active_ssp_ports;
+ u8 platform_id;
+ u8 board_id;
+ u8 ihf_num_chan;
+ u32 osc_clk_freq;
+} __packed;
+
+struct sst_platform_config_data {
+ u32 sst_sram_buff_base;
+ u32 sst_dma_base[SST_MAX_DMA];
+} __packed;
+
+struct sst_platform_debugfs_data {
+ u32 ssp_reg_size;
+ u32 dma_reg_size;
+ u32 checkpoint_offset;
+ u32 checkpoint_size;
+ u8 num_ssp;
+ u8 num_dma;
+};
+
+struct sst_ipc_info {
+ int ipc_offset;
+ bool use_32bit_ops;
+ unsigned int mbox_recv_off;
+};
+
+struct sst_lib_dnld_info {
+ unsigned int mod_base;
+ unsigned int mod_end;
+ unsigned int mod_table_offset;
+ unsigned int mod_table_size;
+ bool mod_ddr_dnld;
+};
+
+struct sst_platform_info {
+ const struct sst_info *probe_data;
+ const struct sst_ssp_info *ssp_data;
+ const struct sst_board_config_data *bdata;
+ const struct sst_platform_config_data *pdata;
+ const struct sst_ipc_info *ipc_info;
+ const struct sst_platform_debugfs_data *debugfs_data;
+ const struct sst_lib_dnld_info *lib_info;
+};
+
+#endif
--- /dev/null
+/*
+ * platform_sst_audio.h: sst audio platform data header file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SST_AUDIO_H_
+#define _PLATFORM_SST_AUDIO_H_
+
+#include <linux/sfi.h>
+
+/* The stream map status is used to dynamically assign
+ * device-id to a device, for example probe device. If
+ * a stream map entry is free for a device then the device-id
+ * for that device will be popluated when the device is
+ * opened and then the status set to IN_USE. When device
+ * is closed, the strm map status is set to FREE again.
+ */
+enum sst_strm_map_status {
+ SST_DEV_MAP_FREE = 0,
+ SST_DEV_MAP_IN_USE,
+};
+
+/* Device IDs for CTP are same as stream IDs */
+enum sst_audio_device_id_ctp {
+ SST_PCM_OUT0 = 1,
+ SST_PCM_OUT1 = 2,
+ SST_COMPRESSED_OUT = 3,
+ SST_CAPTURE_IN = 4,
+ SST_PROBE_IN = 5,
+};
+
+enum sst_audio_task_id_mrfld {
+ SST_TASK_ID_NONE = 0,
+ SST_TASK_ID_SBA = 1,
+ SST_TASK_ID_FBA_UL = 2,
+ SST_TASK_ID_MEDIA = 3,
+ SST_TASK_ID_AWARE = 4,
+ SST_TASK_ID_FBA_DL = 5,
+ SST_TASK_ID_MAX = SST_TASK_ID_FBA_DL,
+};
+
+/* Device IDs for Merrifield are Pipe IDs,
+ * ref: LPE DSP command interface spec v0.75 */
+enum sst_audio_device_id_mrfld {
+ /* Output pipeline IDs */
+ PIPE_ID_OUT_START = 0x0,
+ PIPE_MODEM_OUT = 0x0,
+ PIPE_BT_OUT = 0x1,
+ PIPE_CODEC_OUT0 = 0x2,
+ PIPE_CODEC_OUT1 = 0x3,
+ PIPE_SPROT_LOOP_OUT = 0x4,
+ PIPE_MEDIA_LOOP1_OUT = 0x5,
+ PIPE_MEDIA_LOOP2_OUT = 0x6,
+ PIPE_PROBE_OUT = 0x7,
+ PIPE_HF_SNS_OUT = 0x8, /* VOCIE_UPLINK_REF2 */
+ PIPE_HF_OUT = 0x9, /* VOICE_UPLINK_REF1 */
+ PIPE_SPEECH_OUT = 0xA, /* VOICE UPLINK */
+ PIPE_RxSPEECH_OUT = 0xB, /* VOICE_DOWNLINK */
+ PIPE_VOIP_OUT = 0xC,
+ PIPE_PCM0_OUT = 0xD,
+ PIPE_PCM1_OUT = 0xE,
+ PIPE_PCM2_OUT = 0xF,
+ PIPE_AWARE_OUT = 0x10,
+ PIPE_VAD_OUT = 0x11,
+ PIPE_MEDIA0_OUT = 0x12,
+ PIPE_MEDIA1_OUT = 0x13,
+ PIPE_FM_OUT = 0x14,
+ PIPE_PROBE1_OUT = 0x15,
+ PIPE_PROBE2_OUT = 0x16,
+ PIPE_PROBE3_OUT = 0x17,
+ PIPE_PROBE4_OUT = 0x18,
+ PIPE_PROBE5_OUT = 0x19,
+ PIPE_PROBE6_OUT = 0x1A,
+ PIPE_PROBE7_OUT = 0x1B,
+ PIPE_PROBE8_OUT = 0x1C,
+/* Input Pipeline IDs */
+ PIPE_ID_IN_START = 0x80,
+ PIPE_MODEM_IN = 0x80,
+ PIPE_BT_IN = 0x81,
+ PIPE_CODEC_IN0 = 0x82,
+ PIPE_CODEC_IN1 = 0x83,
+ PIPE_SPROT_LOOP_IN = 0x84,
+ PIPE_MEDIA_LOOP1_IN = 0x85,
+ PIPE_MEDIA_LOOP2_IN = 0x86,
+ PIPE_PROBE_IN = 0x87,
+ PIPE_SIDETONE_IN = 0x88,
+ PIPE_TxSPEECH_IN = 0x89,
+ PIPE_SPEECH_IN = 0x8A,
+ PIPE_TONE_IN = 0x8B,
+ PIPE_VOIP_IN = 0x8C,
+ PIPE_PCM0_IN = 0x8D,
+ PIPE_PCM1_IN = 0x8E,
+ PIPE_MEDIA0_IN = 0x8F,
+ PIPE_MEDIA1_IN = 0x90,
+ PIPE_MEDIA2_IN = 0x91,
+ PIPE_FM_IN = 0x92,
+ PIPE_PROBE1_IN = 0x93,
+ PIPE_PROBE2_IN = 0x94,
+ PIPE_PROBE3_IN = 0x95,
+ PIPE_PROBE4_IN = 0x96,
+ PIPE_PROBE5_IN = 0x97,
+ PIPE_PROBE6_IN = 0x98,
+ PIPE_PROBE7_IN = 0x99,
+ PIPE_PROBE8_IN = 0x9A,
+ PIPE_MEDIA3_IN = 0x9C,
+ PIPE_LOW_PCM0_IN = 0x9D,
+ PIPE_RSVD = 0xFF,
+};
+
+/* The stream map for each platform consists of an array of the below
+ * stream map structure. The array index is used as the static stream-id
+ * associated with a device and (dev_num,subdev_num,direction) tuple match
+ * gives the device_id for the device.
+ */
+struct sst_dev_stream_map {
+ u8 dev_num;
+ u8 subdev_num;
+ u8 direction;
+ u8 device_id;
+ u8 task_id;
+ u8 status;
+};
+
+#define MAX_DESCRIPTOR_SIZE 172
+
+struct sst_dev_effects_map {
+ char uuid[16];
+ u16 algo_id;
+ char descriptor[MAX_DESCRIPTOR_SIZE];
+};
+
+struct sst_dev_effects_resource_map {
+ char uuid[16];
+ unsigned int flags;
+ u16 cpuLoad;
+ u16 memoryUsage;
+};
+
+struct sst_dev_effects {
+ struct sst_dev_effects_map *effs_map;
+ struct sst_dev_effects_resource_map *effs_res_map;
+ unsigned int effs_num_map;
+};
+
+struct sst_platform_data {
+ /* Intel software platform id*/
+ const struct soft_platform_id *spid;
+ struct sst_dev_stream_map *pdev_strm_map;
+ struct sst_dev_effects pdev_effs;
+ unsigned int strm_map_size;
+};
+
+int add_sst_platform_device(void);
+#endif
+
--- /dev/null
+#ifndef __PMIC_PDATA_H__
+#define __PMIC_PDATA_H__
+
+struct temp_lookup {
+ int adc_val;
+ int temp;
+ int temp_err;
+};
+
+/*
+ * pmic cove charger driver info
+ */
+struct pmic_platform_data {
+ void (*cc_to_reg)(int, u8*);
+ void (*cv_to_reg)(int, u8*);
+ void (*inlmt_to_reg)(int, u8*);
+ int max_tbl_row_cnt;
+ struct temp_lookup *adc_tbl;
+};
+
+extern int pmic_get_status(void);
+extern int pmic_enable_charging(bool);
+extern int pmic_set_cc(int);
+extern int pmic_set_cv(int);
+extern int pmic_set_ilimma(int);
+extern int pmic_enable_vbus(bool enable);
+/* WA for ShadyCove VBUS removal detect issue */
+extern int pmic_handle_low_supply(void);
+
+extern void dump_pmic_regs(void);
+#ifdef CONFIG_PMIC_CCSM
+extern int pmic_get_health(void);
+extern int pmic_get_battery_pack_temp(int *);
+#else
+static int pmic_get_health(void)
+{
+ return 0;
+}
+static int pmic_get_battery_pack_temp(int *temp)
+{
+ return 0;
+}
+#endif
+
+#endif
# define NEED_NOPL 0
#endif
-#ifdef CONFIG_MATOM
+#if defined(CONFIG_MATOM) || defined(CONFIG_MSLM)
# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
#else
# define NEED_MOVBE 0
--- /dev/null
+#ifndef _SCU_IPC_RPMSG_H_
+#define _SCU_IPC_RPMSG_H_
+
+struct tx_ipc_msg {
+ u32 cmd;
+ u32 sub;
+ u8 *in;
+ u32 *out;
+ u32 inlen; /* number of bytes to be written */
+ u32 outlen; /* number of dwords to be read */
+ u32 sptr; /* needed for raw ipc command */
+ u32 dptr; /* needed for raw ipc command */
+};
+
+struct rx_ipc_msg {
+ u32 status; /* Indicate IPC status, 0-success, 1-fail */
+};
+
+#endif
extern void setup_default_timer_irq(void);
#ifdef CONFIG_X86_INTEL_MID
-extern void x86_mrst_early_setup(void);
+extern void x86_intel_mid_early_setup(void);
#else
-static inline void x86_mrst_early_setup(void) { }
+static inline void x86_intel_mid_early_setup(void) { }
#endif
#ifdef CONFIG_X86_INTEL_CE
X86_SUBARCH_PC = 0,
X86_SUBARCH_LGUEST,
X86_SUBARCH_XEN,
- X86_SUBARCH_MRST,
+ X86_SUBARCH_INTEL_MID,
X86_SUBARCH_CE4100,
X86_NR_SUBARCHS,
};
#define MSR_AMD_PERF_STATUS 0xc0010063
#define MSR_AMD_PERF_CTL 0xc0010062
+#define MSR_IA32_POWER_MISC 0x00000120
+
+#define ENABLE_ULFM_AUTOCM (1 << 2)
+#define ENABLE_INDP_AUTOCM (1 << 3)
+
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
#include <asm/fixmap.h>
#include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/time.h>
#define APBT_CLOCKEVENT_RATING 110
adev->num = smp_processor_id();
adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
- mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+ intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
adev_virt_addr(adev), 0, apbt_freq);
/* Firmware does EOI handling for us. */
adev->timer->eoi = NULL;
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
global_clock_event = &adev->timer->ced;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
static __init int apbt_late_init(void)
{
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT ||
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
/* This notifier should be called after workqueue is ready */
}
#ifdef CONFIG_SMP
/* kernel cmdline disable apb timer, so we will use lapic timers */
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
printk(KERN_INFO "apbt: disabled per cpu timer\n");
return;
}
#include <asm/mce.h>
#include <asm/tsc.h>
#include <asm/hypervisor.h>
+#include <asm/intel-mid.h>
unsigned int num_processors;
lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
TICK_NSEC, lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+ clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
unsigned long flags;
int maxlvt;
+ /*
+ * On intel_mid, the suspend flow is a bit different, and the lapic
+ * hw implementation, and integration is not supporting standard
+ * suspension.
+ * This implementation is only putting high value to the timer, so that
+ * AONT global timer will be updated with this big value at s0i3 entry,
+ * and wont produce timer based wake up event.
+ */
+ if (intel_mid_identify_cpu() != 0) {
+ apic_write(APIC_TMICT, ~0);
+ return 0;
+ }
+
if (!apic_pm_state.active)
return 0;
unsigned long flags;
int maxlvt;
+ /*
+ * On intel_mid, the resume flow is a bit different.
+ * Refer explanation on lapic_suspend.
+ */
+ if (intel_mid_identify_cpu() != 0) {
+ apic_write(APIC_TMICT, 10);
+ return;
+ }
+
if (!apic_pm_state.active)
return;
writel(vector, &io_apic->eoi);
}
+/*
+ * This index matches with 1024 - 4 address in SCU RTE table area.
+ * That is not used for anything. Works in CLVP only
+ */
+#define LAST_INDEX_IN_IO_APIC_SPACE 255
+#define KERNEL_TO_SCU_PANIC_REQUEST (0x0515dead)
+void apic_scu_panic_dump(void)
+{
+ unsigned long flags;
+
+ printk(KERN_ERR "Request SCU panic dump");
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(0, LAST_INDEX_IN_IO_APIC_SPACE,
+ KERNEL_TO_SCU_PANIC_REQUEST);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+EXPORT_SYMBOL_GPL(apic_scu_panic_dump);
+
unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
return ret;
}
+static int ioapic_set_wake(struct irq_data *data, unsigned int on)
+{
+ return 0;
+}
static void ack_apic_edge(struct irq_data *data)
{
irq_complete_move(data->chip_data);
.irq_ack = ack_apic_edge,
.irq_eoi = ack_apic_level,
.irq_set_affinity = native_ioapic_set_affinity,
+ .irq_set_wake = ioapic_set_wake,
.irq_retrigger = ioapic_retrigger_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static inline void init_IO_APIC_traps(void)
*/
static void clear_all_debug_regs(void)
{
- int i;
+/* int i;
for (i = 0; i < 8; i++) {
+*/
/* Ignore db4, db5 */
- if ((i == 4) || (i == 5))
+/* if ((i == 4) || (i == 5))
continue;
set_debugreg(0, i);
}
+*/
}
#ifdef CONFIG_KGDB
switch (c->x86_model) {
case 0x27: /* Penwell */
case 0x35: /* Cloverview */
+ case 0x4A: /* Merrifield */
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
break;
default:
#include <asm/mce.h>
#include <asm/msr.h>
-/* How long to wait between reporting thermal events */
+/*
+ * How long to wait between reporting thermal events ?
+ * If Interrupt is enabled for Coretemp driver, the BIOS
+ * takes care of hysteresis and thus there are no spurious
+ * interrupts expected. Hence making this interval to 0.
+ */
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+#define CHECK_INTERVAL (0)
+#else
#define CHECK_INTERVAL (300 * HZ)
+#endif
#define THERMAL_THROTTLING_EVENT 0
#define POWER_LIMIT_EVENT 1
/* if we just entered the thermal event */
if (new_event) {
if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+ pr_crit_ratelimited("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
else
- printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
+ pr_crit_ratelimited("CPU%d: %s power limit notification (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
}
if (old_event) {
if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
+ pr_info_ratelimited("CPU%d: %s temperature/speed normal\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package");
else
- printk(KERN_INFO "CPU%d: %s power limit normal\n",
+ pr_info_ratelimited("CPU%d: %s power limit normal\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package");
return 1;
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
mrst_early_console_init();
early_console_register(&early_mrst_console, keep);
}
-
+ if (!strncmp(buf, "mrfld", 5)) {
+ mrfld_early_console_init();
+ early_console_register(&early_mrfld_console, keep);
+ }
if (!strncmp(buf, "hsu", 3)) {
hsu_early_console_init(buf + 3);
early_console_register(&early_hsu_console, keep);
}
+ if (!strncmp(buf, "pti", 3))
+ early_console_register(&early_pti_console, keep);
#endif
buf++;
}
/* Call the subarch specific early setup function */
switch (boot_params.hdr.hardware_subarch) {
- case X86_SUBARCH_MRST:
- x86_mrst_early_setup();
+ case X86_SUBARCH_INTEL_MID:
+ x86_intel_mid_early_setup();
break;
case X86_SUBARCH_CE4100:
x86_ce4100_early_setup();
data = irq_desc_get_irq_data(desc);
affinity = data->affinity;
- if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
+ /* include IRQs who have no action, but are chained */
+ if ((!irq_has_action(irq) && !irq_is_chained(irq)) ||
+ irqd_is_per_cpu(data) ||
cpumask_subset(affinity, cpu_online_mask)) {
raw_spin_unlock(&desc->lock);
continue;
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
+#include <asm/intel-mid.h>
#include <asm/time.h>
-#include <asm/mrst.h>
#include <asm/rtc.h>
+#include <asm/io_apic.h>
#ifdef CONFIG_X86_32
/*
ts->tv_nsec = 0;
}
+static int handle_mrfl_dev_ioapic(int irq)
+{
+ int ret = 0;
+ int ioapic;
+ struct io_apic_irq_attr irq_attr;
+
+ ioapic = mp_find_ioapic(irq);
+ if (ioapic >= 0) {
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ irq_attr.polarity = 0; /* Active high */
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ } else {
+ pr_warn("can not find interrupt %d in ioapic\n", irq);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
static struct resource rtc_resources[] = {
[0] = {
static __init int add_rtc_cmos(void)
{
+ int ret;
+
#ifdef CONFIG_PNP
static const char * const const ids[] __initconst =
{ "PNP0b00", "PNP0b01", "PNP0b02", };
if (of_have_populated_dt())
return 0;
- /* Intel MID platforms don't have ioport rtc */
- if (mrst_identify_cpu())
+ /* Intel MID platforms don't have ioport rtc
+ * except Tangier platform, which doesn't have vRTC
+ */
+ if (intel_mid_identify_cpu() &&
+ intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
return -ENODEV;
+ ret = handle_mrfl_dev_ioapic(RTC_IRQ);
+ if (ret)
+ return ret;
+
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
"registered platform RTC device (no PNP device found)\n");
return 0;
}
+/*
+ * We let cpus' idle tasks announce their own death to complete
+ * logical cpu unplug sequence.
+ */
+DECLARE_COMPLETION(cpu_die_comp);
+
void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */
- unsigned int i;
+ unsigned long timeout = HZ; /* 1 sec */
- for (i = 0; i < 10; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- if (system_state == SYSTEM_RUNNING)
- pr_info("CPU %u is now offline\n", cpu);
- return;
- }
- msleep(100);
- }
- pr_err("CPU %u didn't die...\n", cpu);
+ /* They ack this in play_dead by setting CPU_DEAD */
+ wait_for_completion_timeout(&cpu_die_comp, timeout);
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+ return;
+ } else
+ pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
mb();
/* Ack it */
__this_cpu_write(cpu_state, CPU_DEAD);
+ complete(&cpu_die_comp);
/*
* With physical CPU hotplug, we should halt the cpu
highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
}
}
- eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
+
+ if (highest_cstate < 6) {
+ eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+ } else {
+ /* For s0i3 substate code is 4 */
+ eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ ((highest_subcstate - 1) * 2);
+ }
}
/*
wbinvd();
+ /*
+ * FIXME: SCU will abort S3 entry with ACK C6 timeout
+ * if the lapic timer value programmed is low.
+ * Hence program a high value before offlineing the CPU
+ */
+ apic_write(APIC_TMICT, ~0);
+
while (1) {
/*
* The CLFLUSH is a workaround for erratum AAI65 for
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
#define PCIE_CAP_OFFSET 0x100
where, size, value);
}
-static int mrst_pci_irq_enable(struct pci_dev *dev)
+static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
u8 pin;
struct io_apic_irq_attr irq_attr;
* IOAPIC RTE entries, so we just enable RTE for the device.
*/
irq_attr.ioapic = mp_find_ioapic(dev->irq);
+ if (irq_attr.ioapic < 0)
+ return -1;
irq_attr.ioapic_pin = dev->irq;
irq_attr.trigger = 1; /* level */
- irq_attr.polarity = 1; /* active low */
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ irq_attr.polarity = 0; /* active high */
+ else
+ irq_attr.polarity = 1; /* active low */
io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
return 0;
}
-struct pci_ops pci_mrst_ops = {
+struct pci_ops intel_mid_pci_ops = {
.read = pci_read,
.write = pci_write,
};
/**
- * pci_mrst_init - installs pci_mrst_ops
+ * intel_mid_pci_init - installs intel_mid_pci_ops
*
* Moorestown has an interesting PCI implementation (see above).
* Called when the early platform detection installs it.
*/
-int __init pci_mrst_init(void)
+int __init intel_mid_pci_init(void)
{
printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
pci_mmcfg_late_init();
- pcibios_enable_irq = mrst_pci_irq_enable;
- pci_root_ops = pci_mrst_ops;
+ pcibios_enable_irq = intel_mid_pci_irq_enable;
+ pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
return 1;
if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
return;
dev->d3_delay = 0;
+ dev->d3cold_delay = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
obj-y += geode/
obj-y += goldfish/
obj-y += iris/
-obj-y += mrst/
+obj-y += intel-mid/
obj-y += olpc/
obj-y += scx200/
obj-y += sfi/
--- /dev/null
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o
+obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o
+
+# SFI specific code
+obj-$(CONFIG_SFI) += intel_mid_sfi.o
+
+# platform configuration for board devices
+obj-y += device_libs/
+
+# SoC specific files
+obj-$(CONFIG_X86_INTEL_MID) += mfld.o mrfl.o
+obj-$(CONFIG_X86_WANT_INTEL_MID) += intel_mid_pcihelpers.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_scu.o
+
+# BOARD files
+obj-$(CONFIG_X86_INTEL_MID) += board.o
+
+# PMU driver
+obj-$(CONFIG_ATOM_SOC_POWER) += intel_soc_pmu.o intel_soc_pm_debug.o intel_soc_dump.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) += intel_soc_mdfld.o intel_soc_mdfld_clv_common.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER) += intel_soc_clv.o intel_soc_mdfld_clv_common.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) += intel_soc_mrfld.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) += pmu_tng.o
+
+# Debug features driver
+obj-$(CONFIG_INTEL_DEBUG_FEATURE) += intel_soc_debug.o
--- /dev/null
+/*
+ * board-blackbay.c: Intel Medfield based board (Blackbay)
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/i2c-gpio.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+/*
+ * IPC devices
+ */
+#include "device_libs/platform_ipc.h"
+#include "device_libs/platform_pmic_gpio.h"
+#include "device_libs/platform_msic.h"
+#include "device_libs/platform_msic_battery.h"
+#include "device_libs/platform_msic_gpio.h"
+#include "device_libs/platform_msic_audio.h"
+#include "device_libs/platform_msic_power_btn.h"
+#include "device_libs/platform_msic_ocd.h"
+#include "device_libs/platform_mrfl_pmic.h"
+#include "device_libs/platform_mrfl_pmic_i2c.h"
+#include "device_libs/platform_mrfl_ocd.h"
+#include "device_libs/platform_msic_thermal.h"
+#include "device_libs/platform_soc_thermal.h"
+#include "device_libs/platform_msic_adc.h"
+#include "device_libs/platform_bcove_adc.h"
+#include "device_libs/platform_mrfld_audio.h"
+#include "device_libs/platform_mrfl_thermal.h"
+
+/*
+ * I2C devices
+ */
+#include "device_libs/platform_max7315.h"
+#include "device_libs/platform_tca6416.h"
+#include "device_libs/platform_mpu3050.h"
+#include "device_libs/platform_emc1403.h"
+#include "device_libs/platform_lis331.h"
+#include "device_libs/platform_mpu3050.h"
+#include "device_libs/platform_tc35876x.h"
+#include "device_libs/platform_bq24261.h"
+#include "device_libs/platform_pcal9555a.h"
+
+#include "device_libs/platform_wm8994.h"
+
+/*
+ * SPI devices
+ */
+#include "device_libs/platform_max3111.h"
+#include "device_libs/platform_spidev.h"
+#include "device_libs/platform_ads7955.h"
+
+/* WIFI devices */
+#include "device_libs/platform_wl12xx.h"
+#include "device_libs/platform_wifi.h"
+
+static void __init *no_platform_data(void *info)
+{
+ return NULL;
+}
+
+struct devs_id __initconst device_ids[] = {
+ /* SD devices */
+ {"wl12xx_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wl12xx_platform_data, NULL},
+ {"bcm43xx_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+ {"bcm43xx_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+ {"iwlwifi_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+ {"WLAN_FAST_IRQ", SFI_DEV_TYPE_SD, 0, &no_platform_data,
+ &wifi_platform_data_fastirq},
+
+ /* I2C devices*/
+ {"pcal9555a-1", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+ {"pcal9555a-2", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+ {"pcal9555a-3", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+ {"pcal9555a-4", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+
+ /* SPI devices */
+ {"spidev", SFI_DEV_TYPE_SPI, 0, &spidev_platform_data, NULL},
+ {"ads7955", SFI_DEV_TYPE_SPI, 0, &ads7955_platform_data, NULL},
+ {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+ {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data, NULL},
+ {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data,
+ &ipc_device_handler},
+ {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data, NULL},
+ {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+ {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+ {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data, NULL},
+ {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data, NULL},
+ {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data, NULL},
+ {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+ &ipc_device_handler},
+ {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data, NULL},
+ {"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data, NULL},
+
+ /* MSIC subdevices */
+ {"msic_adc", SFI_DEV_TYPE_IPC, 1, &msic_adc_platform_data,
+ &ipc_device_handler},
+ {"bcove_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+ &ipc_device_handler},
+ {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data,
+ &ipc_device_handler},
+ {"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data,
+ &ipc_device_handler},
+ {"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data,
+ &ipc_device_handler},
+ {"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+ &ipc_device_handler},
+ {"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data,
+ &ipc_device_handler},
+ {"bcove_bcu", SFI_DEV_TYPE_IPC, 1, &mrfl_ocd_platform_data,
+ &ipc_device_handler},
+ {"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data,
+ &ipc_device_handler},
+ {"bcove_adc", SFI_DEV_TYPE_IPC, 1, &bcove_adc_platform_data,
+ &ipc_device_handler},
+ {"bcove_thrm", SFI_DEV_TYPE_IPC, 1, &mrfl_thermal_platform_data,
+ &ipc_device_handler},
+ {"wm8994", SFI_DEV_TYPE_I2C, 0, &wm8994_platform_data, NULL},
+ /* IPC devices */
+ {"pmic_charger", SFI_DEV_TYPE_IPC, 1, &no_platform_data, NULL},
+ {"pmic_ccsm", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_ccsm_platform_data,
+ &ipc_device_handler},
+ {"i2c_pmic_adap", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_i2c_platform_data,
+ &ipc_device_handler},
+ {"mrfld_sst", SFI_DEV_TYPE_IPC, 1, &mrfld_sst_audio_platform_data,
+ &ipc_device_handler},
+ {"soc_thrm", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+ &soc_thrm_device_handler},
+ {},
+};
--- /dev/null
+# IPC Devices
+obj-y += platform_sst_audio.o
+obj-y += platform_mrfl_regulator.o
+obj-y += platform_soc_thermal.o
+obj-$(subst m,y,$(CONFIG_SND_BYT_MACHINE)) += platform_byt_audio.o
+obj-$(subst m,y,$(CONFIG_SND_MRFLD_MACHINE)) += platform_mrfld_audio.o
+obj-$(subst m,y,$(CONFIG_SND_CTP_MACHINE)) += platform_ctp_audio.o
+obj-y += platform_ipc.o
+obj-y += platform_msic.o
+obj-y += platform_msic_audio.o
+obj-y += platform_msic_gpio.o
+obj-y += platform_msic_ocd.o
+obj-y += platform_tc35876x.o
+obj-y += pci/
+obj-$(subst m,y,$(CONFIG_BATTERY_INTEL_MDF)) += platform_msic_battery.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
+obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
+obj-$(subst m,y,$(CONFIG_MID_PWM)) += platform_mid_pwm.o
+obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MID_VDD)) += platform_msic_vdd.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MRFL_OCD)) += platform_mrfl_ocd.o
+obj-$(subst m,y,$(CONFIG_PMIC_CCSM)) += platform_mrfl_pmic.o
+obj-$(subst m,y,$(CONFIG_I2C_PMIC)) += platform_mrfl_pmic_i2c.o
+ifdef CONFIG_INTEL_BYT_THERMAL
+obj-$(subst m,y,$(CONFIG_INTEL_BYT_THERMAL)) += platform_byt_thermal.o
+else
+obj-$(subst m,y,$(CONFIG_INTEL_BYT_EC_THERMAL)) += platform_byt_thermal.o
+endif
+obj-$(subst m,y,$(CONFIG_SENSORS_THERMAL_MRFLD)) += platform_mrfl_thermal.o
+obj-$(subst m,y,$(CONFIG_INTEL_SCU_FLIS)) += platform_scu_flis.o
+# I2C Devices
+obj-$(subst m,y,$(CONFIG_I2C_DESIGNWARE_CORE_FORK)) += platform_dw_i2c.o
+obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
+obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MPU3050)) += platform_mpu3050.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
+obj-$(subst m,y,$(CONFIG_BQ24261_CHARGER)) += platform_bq24261.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
+obj-$(subst m,y,$(CONFIG_SND_SOC_WM8994)) += platform_wm8994.o
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_TI_ADS7955_ADC)) += platform_ads7955.o
+# MISC Devices
+obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+# ADC
+obj-$(subst m,y,$(CONFIG_MSIC_GPADC)) += platform_msic_adc.o
+obj-$(subst m,y,$(CONFIG_IIO_BASINCOVE_GPADC)) += platform_bcove_adc.o
+# UART Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MFD_HSU)) += platform_hsu.o
+# SD Devices
+obj-$(subst m,y,$(CONFIG_WILINK_PLATFORM_DATA)) += platform_wl12xx.o
+ifndef CONFIG_ACPI
+obj-$(subst m,y,$(CONFIG_BCM_BT_LPM)) += platform_btlpm.o
+endif
+#I2C Devices
+# Panel Control Device
+obj-$(subst m,y,$(CONFIG_DRM_MRFLD)) += platform_panel.o
+# GPS
+obj-$(subst m,y,$(CONFIG_INTEL_MID_GPS)) += platform_gps.o
+# WIFI devices
+obj-$(subst m,y,$(CONFIG_WIFI_PLATFORM_DATA)) += platform_wifi.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_ACPI)) += platform_sdio_regulator.o
+# SCU log
+obj-$(subst m,y,$(CONFIG_SCU_LOGGING)) += platform_scu_log.o
--- /dev/null
+# MMC Sdhci pci host controller platform data
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_sdhci_pci.o
+# USB OTG controller platform data
+obj-$(subst m,y,$(CONFIG_USB_PENWELL_OTG)) += platform_usb_otg.o
+obj-$(subst m,y,$(CONFIG_USB_DWC3_OTG)) += platform_usb_otg.o
+obj-$(subst m,y,$(CONFIG_SND_INTEL_SST)) += platform_sst_pci.o
--- /dev/null
+/*
+ * platform_mmc_sdhci_pci.c: mmc sdhci pci platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/mmc/sdhci-pci-data.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/hardirq.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+#include "platform_sdhci_pci.h"
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static int panic_mode_emmc0_power_up(void *data)
+{
+ int ret;
+ bool atomic_context;
+ /*
+ * Since pmu_set_emmc_to_d0i0_atomic function can
+ * only be used in atomic context, before call this
+ * function, do a check first and make sure this function
+ * is used in atomic context.
+ */
+ atomic_context = (!preemptible() || in_atomic_preempt_off());
+
+ if (!atomic_context) {
+ pr_err("%s: not in atomic context!\n", __func__);
+ return -EPERM;
+ }
+
+ ret = pmu_set_emmc_to_d0i0_atomic();
+ if (ret) {
+ pr_err("%s: power up host failed with err %d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+#else
+static int panic_mode_emmc0_power_up(void *data)
+{
+ return 0;
+}
+#endif
+
+static unsigned int sdhci_pdata_quirks = SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8
+ | SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY;
+
+int sdhci_pdata_set_quirks(const unsigned int quirks)
+{
+ sdhci_pdata_quirks = quirks;
+ return 0;
+}
+
+static int mrfl_flis_check(void *data, unsigned int clk);
+static int mrfl_sdio_setup(struct sdhci_pci_data *data);
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data);
+
+static void (*sdhci_embedded_control)(void *dev_id, void (*virtual_cd)
+ (void *dev_id, int card_present));
+
+/*****************************************************************************\
+ * *
+ * Regulator declaration for WLAN SDIO card *
+ * *
+\*****************************************************************************/
+
+#define DELAY_ONOFF 250
+
+static struct regulator_consumer_supply wlan_vmmc_supply = {
+ .supply = "vmmc",
+};
+
+static struct regulator_init_data wlan_vmmc_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wlan_vmmc_supply,
+};
+
+static struct fixed_voltage_config vwlan = {
+ .supply_name = "vwlan",
+ .microvolts = 1800000,
+ .gpio = -EINVAL,
+ .startup_delay = 1000 * DELAY_ONOFF,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &wlan_vmmc_data,
+};
+
+static void vwlan_device_release(struct device *dev) {}
+
+static struct platform_device vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = PLATFORM_DEVID_AUTO,
+ .dev = {
+ .platform_data = &vwlan,
+ .release = vwlan_device_release,
+ },
+};
+
+
+/* Board specific setup related to SDIO goes here */
+static int mfld_sdio_setup(struct sdhci_pci_data *data)
+{
+ struct pci_dev *pdev = data->pdev;
+ /* Control card power through a regulator */
+ wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+ vwlan.gpio = get_gpio_by_name("WLAN-enable");
+ if (vwlan.gpio < 0)
+ pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+ __func__);
+ pr_info("vwlan gpio %d\n", vwlan.gpio);
+ /* add a regulator to control wlan enable gpio */
+ if (platform_device_register(&vwlan_device))
+ pr_err("regulator register failed\n");
+ else
+ sdhci_pci_request_regulators();
+
+ return 0;
+}
+
+
+/* MFLD platform data */
+static struct sdhci_pci_data mfld_sdhci_pci_data[] = {
+ [EMMC0_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .setup = 0,
+ .cleanup = 0,
+ .power_up = panic_mode_emmc0_power_up,
+ },
+ [EMMC1_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .setup = 0,
+ .cleanup = 0,
+ },
+ [SD_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = 69,
+ .setup = 0,
+ .cleanup = 0,
+ },
+ [SDIO_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = mfld_sdio_setup,
+ .cleanup = 0,
+ },
+};
+
+/* Board specific setup related to SDIO goes here */
+static int clv_sdio_setup(struct sdhci_pci_data *data)
+{
+ struct pci_dev *pdev = data->pdev;
+ /* Control card power through a regulator */
+ wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+ vwlan.gpio = get_gpio_by_name("WLAN-enable");
+ if (vwlan.gpio < 0)
+ pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+ __func__);
+ pr_info("vwlan gpio %d\n", vwlan.gpio);
+ /* add a regulator to control wlan enable gpio */
+ if (platform_device_register(&vwlan_device))
+ pr_err("regulator register failed\n");
+ else
+ sdhci_pci_request_regulators();
+
+ return 0;
+}
+
+/* CLV platform data */
+static struct sdhci_pci_data clv_sdhci_pci_data[] = {
+ [EMMC0_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .setup = 0,
+ .cleanup = 0,
+ .power_up = panic_mode_emmc0_power_up,
+ },
+ [EMMC1_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .setup = 0,
+ .cleanup = 0,
+ },
+ [SD_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = 69,
+ .setup = 0,
+ .cleanup = 0,
+ },
+ [SDIO_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = clv_sdio_setup,
+ .cleanup = 0,
+ },
+};
+
+#define TNG_EMMC_0_FLIS_ADDR 0xff0c0900
+#define TNG_EMMC_FLIS_SLEW 0x00000400
+#define TNG_EMMC_0_CLK_PULLDOWN 0x00000200
+static int mrfl_flis_slew_change(int slew)
+{
+ void __iomem *flis_addr;
+ unsigned int reg;
+ int i, ret = 0;
+
+ flis_addr = ioremap_nocache(TNG_EMMC_0_FLIS_ADDR, 64);
+
+ if (!flis_addr) {
+ pr_err("flis_addr ioremap fail!\n");
+ ret = -ENOMEM;
+ } else {
+ pr_info("flis_addr mapped addr: %p\n", flis_addr);
+ /*
+ * Change TNG gpio FLIS settings for all eMMC0
+ * CLK/CMD/DAT pins.
+ * That is, including emmc_0_clk, emmc_0_cmd,
+ * emmc_0_d_0, emmc_0_d_1, emmc_0_d_2, emmc_0_d_3,
+ * emmc_0_d_4, emmc_0_d_5, emmc_0_d_6, emmc_0_d_7
+ */
+ for (i = 0; i < 10; i++) {
+ reg = readl(flis_addr + (i * 4));
+ if (slew)
+ reg |= TNG_EMMC_FLIS_SLEW; /* SLEW B */
+ else
+ reg &= ~TNG_EMMC_FLIS_SLEW; /* SLEW A */
+ writel(reg, flis_addr + (i * 4));
+ }
+
+ /* Disable PullDown for emmc_0_clk */
+ reg = readl(flis_addr);
+ reg &= ~TNG_EMMC_0_CLK_PULLDOWN;
+ writel(reg, flis_addr);
+
+ ret = 0;
+ }
+
+ if (flis_addr)
+ iounmap(flis_addr);
+
+ return ret;
+}
+
+static int mrfl_flis_check(void *data, unsigned int clk)
+{
+ struct sdhci_host *host = data;
+ int ret = 0;
+
+ if ((host->clock <= 52000000) && (clk > 52000000))
+ ret = mrfl_flis_slew_change(1);
+ else if ((host->clock > 52000000) && (clk <= 52000000))
+ ret = mrfl_flis_slew_change(0);
+
+ return ret;
+}
+
+/* Board specific setup related to eMMC goes here */
+static int mrfl_emmc_setup(struct sdhci_pci_data *data)
+{
+ struct pci_dev *pdev = data->pdev;
+ int ret = 0;
+
+ if (pdev->revision == 0x01) /* TNB B0 stepping */
+ ret = mrfl_flis_slew_change(1); /* HS200 FLIS slew setting */
+
+ return ret;
+}
+
+/* Board specific setup related to SD goes here */
+static int mrfl_sd_setup(struct sdhci_pci_data *data)
+{
+ u8 vldocnt = 0;
+ int err;
+
+ /*
+ * Change necessary GPIO pin mode for SD card working.
+ * This is something should be done in IA firmware.
+ * But, anyway, just do it here in case IA firmware
+ * forget to do so.
+ */
+ lnw_gpio_set_alt(MRFLD_GPIO_SDIO_0_CD, 0);
+
+ err = intel_scu_ipc_ioread8(MRFLD_PMIC_VLDOCNT, &vldocnt);
+ if (err) {
+ pr_err("PMIC vldocnt IPC read error: %d\n", err);
+ return err;
+ }
+
+ vldocnt |= MRFLD_PMIC_VLDOCNT_VSWITCH_BIT;
+ err = intel_scu_ipc_iowrite8(MRFLD_PMIC_VLDOCNT, vldocnt);
+ if (err) {
+ pr_err("PMIC vldocnt IPC write error: %d\n", err);
+ return err;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+/* Board specific cleanup related to SD goes here */
+static void mrfl_sd_cleanup(struct sdhci_pci_data *data)
+{
+}
+
+/* Board specific setup related to SDIO goes here */
+static int mrfl_sdio_setup(struct sdhci_pci_data *data)
+{
+ struct pci_dev *pdev = data->pdev;
+ /* Control card power through a regulator */
+ wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+ vwlan.gpio = get_gpio_by_name("WLAN-enable");
+ if (vwlan.gpio < 0)
+ pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+ __func__);
+ pr_info("vwlan gpio %d\n", vwlan.gpio);
+ /* add a regulator to control wlan enable gpio */
+ if (platform_device_register(&vwlan_device))
+ pr_err("regulator register failed\n");
+ else
+ sdhci_pci_request_regulators();
+
+ return 0;
+}
+
+/* Board specific cleanup related to SDIO goes here */
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data)
+{
+}
+
+/* MRFL platform data */
+static struct sdhci_pci_data mrfl_sdhci_pci_data[] = {
+ [EMMC0_INDEX] = {
+ .pdev = NULL,
+ .slotno = EMMC0_INDEX,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = mrfl_emmc_setup,
+ .cleanup = 0,
+ .power_up = panic_mode_emmc0_power_up,
+ },
+ [SD_INDEX] = {
+ .pdev = NULL,
+ .slotno = SD_INDEX,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = 77,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = mrfl_sd_setup,
+ .cleanup = mrfl_sd_cleanup,
+ },
+ [SDIO_INDEX] = {
+ .pdev = NULL,
+ .slotno = SDIO_INDEX,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = mrfl_sdio_setup,
+ .cleanup = mrfl_sdio_cleanup,
+ },
+};
+
+/* Board specific setup related to SDIO goes here */
+static int byt_sdio_setup(struct sdhci_pci_data *data)
+{
+ struct pci_dev *pdev = data->pdev;
+#ifdef CONFIG_ACPI
+ acpi_handle handle;
+ acpi_status status;
+#endif
+
+ /* Control card power through a regulator */
+ wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+
+#ifdef CONFIG_ACPI
+ status = acpi_get_handle(NULL, "\\_SB.SDHB", &handle);
+ if (ACPI_FAILURE(status))
+ pr_err("wlan: cannot get SDHB acpi handle");
+ ACPI_HANDLE_SET(&pdev->dev, handle);
+ vwlan.gpio = acpi_get_gpio_by_index(&pdev->dev, 0, NULL);
+#endif
+ if (vwlan.gpio < 0)
+ pr_err("%s: No wlan-enable GPIO in SDHB ACPI block\n",
+ __func__);
+
+ pr_info("vwlan gpio %d\n", vwlan.gpio);
+
+ /* add a regulator to control wlan enable gpio */
+ if (platform_device_register(&vwlan_device))
+ pr_err("regulator register failed\n");
+ else
+ sdhci_pci_request_regulators();
+
+ return 0;
+}
+
+
+/* BYT platform data */
+static struct sdhci_pci_data byt_sdhci_pci_data[] = {
+ [SDIO_INDEX] = {
+ .pdev = NULL,
+ .slotno = 0,
+ .rst_n_gpio = -EINVAL,
+ .cd_gpio = -EINVAL,
+ .quirks = 0,
+ .platform_quirks = 0,
+ .setup = byt_sdio_setup,
+ .cleanup = NULL,
+ },
+};
+
+static struct sdhci_pci_data *get_sdhci_platform_data(struct pci_dev *pdev)
+{
+ struct sdhci_pci_data *pdata = NULL;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+ pdata = &mfld_sdhci_pci_data[EMMC0_INDEX];
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+ pdata = &mfld_sdhci_pci_data[EMMC1_INDEX];
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_SD:
+ pdata = &mfld_sdhci_pci_data[SD_INDEX];
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_SDIO1:
+ pdata = &mfld_sdhci_pci_data[SDIO_INDEX];
+ pdata->quirks = sdhci_pdata_quirks;
+ pdata->register_embedded_control = sdhci_embedded_control;
+ break;
+ case PCI_DEVICE_ID_INTEL_CLV_EMMC0:
+ pdata = &clv_sdhci_pci_data[EMMC0_INDEX];
+ pdata->rst_n_gpio = get_gpio_by_name("emmc0_rst");
+ break;
+ case PCI_DEVICE_ID_INTEL_CLV_EMMC1:
+ pdata = &clv_sdhci_pci_data[EMMC1_INDEX];
+ pdata->rst_n_gpio = get_gpio_by_name("emmc1_rst");
+ break;
+ case PCI_DEVICE_ID_INTEL_CLV_SDIO0:
+ pdata = &clv_sdhci_pci_data[SD_INDEX];
+ break;
+ case PCI_DEVICE_ID_INTEL_CLV_SDIO1:
+ pdata = &clv_sdhci_pci_data[SDIO_INDEX];
+ pdata->quirks = sdhci_pdata_quirks;
+ pdata->register_embedded_control = sdhci_embedded_control;
+ break;
+ case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+ switch (PCI_FUNC(pdev->devfn)) {
+ case 0:
+ pdata = &mrfl_sdhci_pci_data[EMMC0_INDEX];
+ /*
+ * The current eMMC device simulation in Merrifield
+ * VP only implements boot partition 0, does not
+ * implements boot partition 1. And the VP will
+ * crash if eMMC boot partition 1 is accessed
+ * during kernel boot. So, we just disable boot
+ * partition support for Merrifield VP platform.
+ */
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_VP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_EMMC_BOOT_PART;
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_HVP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_HIGH_SPEED;
+ break;
+ case 1:
+ pdata = &mrfl_sdhci_pci_data[EMMC1_INDEX];
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_VP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_EMMC_BOOT_PART;
+ /*
+ * Merrifield HVP platform only implements
+ * eMMC0 host controller in its FPGA, and
+ * does not implements other 3 Merrifield
+ * SDHCI host controllers.
+ */
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_HVP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_HOST_CTRL_HW;
+ break;
+ case 2:
+ pdata = &mrfl_sdhci_pci_data[SD_INDEX];
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_HVP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_HOST_CTRL_HW;
+ break;
+ case 3:
+ pdata = &mrfl_sdhci_pci_data[SDIO_INDEX];
+ if (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_HVP)
+ pdata->platform_quirks |=
+ PLFM_QUIRK_NO_HOST_CTRL_HW;
+ pdata->quirks = sdhci_pdata_quirks;
+ pdata->register_embedded_control =
+ sdhci_embedded_control;
+ break;
+ default:
+ pr_err("%s func %s: Invalid PCI Dev func no. (%d)\n",
+ __FILE__, __func__, PCI_FUNC(pdev->devfn));
+ break;
+ }
+ break;
+ case PCI_DEVICE_ID_INTEL_BYT_SDIO:
+ pr_err("setting quirks/embedded controls on SDIO");
+ pdata = &byt_sdhci_pci_data[SDIO_INDEX];
+ pdata->quirks = sdhci_pdata_quirks;
+ pdata->register_embedded_control = sdhci_embedded_control;
+ break;
+ default:
+ break;
+ }
+ return pdata;
+}
+
+int sdhci_pdata_set_embedded_control(void (*fnp)
+ (void *dev_id, void (*virtual_cd)
+ (void *dev_id, int card_present)))
+{
+ WARN_ON(sdhci_embedded_control);
+ sdhci_embedded_control = fnp;
+ return 0;
+}
+
+struct sdhci_pci_data *mmc_sdhci_pci_get_data(struct pci_dev *pci_dev, int slotno)
+{
+ return get_sdhci_platform_data(pci_dev);
+}
+
+static int __init init_sdhci_get_data(void)
+{
+ sdhci_pci_get_data = mmc_sdhci_pci_get_data;
+
+ return 0;
+}
+
+arch_initcall(init_sdhci_get_data);
+
--- /dev/null
+/*
+ * platform_mmc_sdhci_pci.h: mmc sdhci pci platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MMC_SDHCI_PCI_H_
+#define _PLATFORM_MMC_SDHCI_PCI_H_
+
+#define EMMC0_INDEX 0
+#define EMMC1_INDEX 1
+#define SD_INDEX 2
+#define SDIO_INDEX 3
+
+#define MRFLD_GPIO_SDIO_0_CD 77
+
+#define MRFLD_PMIC_VLDOCNT 0xaf
+#define MRFLD_PMIC_VLDOCNT_VSWITCH_BIT 0x02
+
+int sdhci_pdata_set_quirks(const unsigned int quirks);
+int sdhci_pdata_set_embedded_control(void (*fnp)
+ (void *dev_id, void (*virtual_cd)
+ (void *dev_id, int card_present)));
+#endif
+
--- /dev/null
+/*
+ * platform_sst_pci.c: SST platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/intel_mid_dma.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst.h>
+
+#define CTP_SSP_BASE 0xffa23000
+#define CTP_DMA_BASE 0xffaf8000
+#define MRFLD_SSP_BASE 0xff2a0000
+#define MRFLD_DMA_BASE 0xff298000
+#define CTP_MAX_CONFIG_SIZE 500
+
+#define SST_CTP_IRAM_START 0
+#define SST_CTP_IRAM_END 0x80000
+#define SST_CTP_DRAM_START 0x400000
+#define SST_CTP_DRAM_END 0x480000
+#define SSP_SIZE 0x1000
+#define DMA_SIZE_CTP 0x1000
+#define DMA_SIZE_MRFLD 0x4000
+#define SST_CHECKPOINT_OFFSET 0x1C00
+#define SST_CHECKPOINT_OFFSET_MRFLD 0x0C00
+#define CHECKPOINT_DUMP_SZ 256
+
+#define SST_V1_MAILBOX_RECV 0x800
+#define SST_V2_MAILBOX_RECV 0x400
+
+#define MRFLD_FW_LSP_DDR_BASE 0xC5E00000
+#define MRFLD_FW_MOD_END (MRFLD_FW_LSP_DDR_BASE + 0x1FFFFF)
+#define MRFLD_FW_MOD_TABLE_OFFSET 0x80000
+#define MRFLD_FW_MOD_TABLE_SIZE 0x100
+
+struct sst_platform_info sst_data;
+
+static struct sst_ssp_info ssp_inf_ctp = {
+ .base_add = CTP_SSP_BASE,
+ .gpio = {
+ .alt_function = LNW_ALT_2,
+ },
+ .gpio_in_use = true,
+};
+
+static struct sst_ssp_info ssp_inf_mrfld = {
+ .base_add = MRFLD_SSP_BASE,
+ .gpio_in_use = false,
+};
+
+static const struct sst_platform_config_data sst_ctp_pdata = {
+ .sst_sram_buff_base = 0xfffc0000,
+ .sst_dma_base[0] = CTP_DMA_BASE,
+ .sst_dma_base[1] = 0x0,
+};
+
+static struct sst_platform_config_data sst_mrfld_pdata = {
+ .sst_dma_base[0] = MRFLD_DMA_BASE,
+ .sst_dma_base[1] = 0x0,
+};
+
+static const struct sst_board_config_data sst_ctp_bdata = {
+ .active_ssp_ports = 4,
+ .platform_id = 2,/*FIXME: Once the firmware fix is available*/
+ .board_id = 1,/*FIXME: Once the firmware fix is available*/
+ .ihf_num_chan = 2,
+ .osc_clk_freq = 19200000,
+ .ssp_platform_data = {
+ [SST_SSP_AUDIO] = {
+ .ssp_cfg_sst = 1,
+ .port_number = 3,
+ .is_master = 1,
+ .pack_mode = 1,
+ .num_slots_per_frame = 2,
+ .num_bits_per_slot = 25,
+ .active_tx_map = 3,
+ .active_rx_map = 3,
+ .ssp_frame_format = 3,
+ .frame_polarity = 0,
+ .serial_bitrate_clk_mode = 0,
+ .frame_sync_width = 24,
+ .dma_handshake_interface_tx = 5,
+ .dma_handshake_interface_rx = 4,
+ .ssp_base_add = 0xFFA23000,
+ },
+ [SST_SSP_MODEM] = {0},
+ [SST_SSP_BT] = {0},
+ [SST_SSP_FM] = {0},
+ },
+};
+
+static const struct sst_info ctp_sst_info = {
+ .iram_start = SST_CTP_IRAM_START,
+ .iram_end = SST_CTP_IRAM_END,
+ .iram_use = true,
+ .dram_start = SST_CTP_DRAM_START,
+ .dram_end = SST_CTP_DRAM_END,
+ .dram_use = true,
+ .imr_start = 0,
+ .imr_end = 0,
+ .imr_use = false,
+ .mailbox_start = 0,
+ .lpe_viewpt_rqd = false,
+ .use_elf = false,
+ .max_streams = MAX_NUM_STREAMS_CTP,
+ .dma_max_len = (SST_MAX_DMA_LEN * 4),
+ .num_probes = 1,
+};
+
+static const struct sst_ipc_info ctp_ipc_info = {
+ .use_32bit_ops = true,
+ .ipc_offset = 0,
+ .mbox_recv_off = SST_V1_MAILBOX_RECV,
+};
+
+static const struct sst_info mrfld_sst_info = {
+ .iram_start = 0,
+ .iram_end = 0,
+ .iram_use = false,
+ .dram_start = 0,
+ .dram_end = 0,
+ .dram_use = false,
+ .imr_start = 0,
+ .imr_end = 0,
+ .imr_use = false,
+ .mailbox_start = 0,
+ .use_elf = true,
+ .lpe_viewpt_rqd = false,
+ .max_streams = MAX_NUM_STREAMS_MRFLD,
+ .dma_max_len = SST_MAX_DMA_LEN_MRFLD,
+ .num_probes = 16,
+};
+
+static struct sst_platform_debugfs_data ctp_debugfs_data = {
+ .ssp_reg_size = SSP_SIZE,
+ .dma_reg_size = DMA_SIZE_CTP,
+ .num_ssp = 1,
+ .num_dma = 1,
+ .checkpoint_offset = SST_CHECKPOINT_OFFSET,
+ .checkpoint_size = CHECKPOINT_DUMP_SZ,
+};
+
+static struct sst_platform_debugfs_data mrfld_debugfs_data = {
+ .ssp_reg_size = SSP_SIZE,
+ .dma_reg_size = DMA_SIZE_MRFLD,
+ .num_ssp = 3,
+ .num_dma = 2,
+ .checkpoint_offset = SST_CHECKPOINT_OFFSET_MRFLD,
+ .checkpoint_size = CHECKPOINT_DUMP_SZ,
+};
+
+static const struct sst_ipc_info mrfld_ipc_info = {
+ .use_32bit_ops = false,
+ .ipc_offset = 0,
+ .mbox_recv_off = SST_V2_MAILBOX_RECV,
+};
+
+static const struct sst_lib_dnld_info mrfld_lib_dnld_info = {
+ .mod_base = MRFLD_FW_LSP_DDR_BASE,
+ .mod_end = MRFLD_FW_MOD_END,
+ .mod_table_offset = MRFLD_FW_MOD_TABLE_OFFSET,
+ .mod_table_size = MRFLD_FW_MOD_TABLE_SIZE,
+ .mod_ddr_dnld = true,
+};
+
+static int set_ctp_sst_config(struct sst_platform_info *sst_info)
+{
+ unsigned int conf_len;
+
+ ssp_inf_ctp.gpio.i2s_rx_alt = get_gpio_by_name("gpio_i2s3_rx");
+ ssp_inf_ctp.gpio.i2s_tx_alt = get_gpio_by_name("gpio_i2s3_rx");
+ ssp_inf_ctp.gpio.i2s_frame = get_gpio_by_name("gpio_i2s3_fs");
+ ssp_inf_ctp.gpio.i2s_clock = get_gpio_by_name("gpio_i2s3_clk");
+
+ sst_info->ssp_data = &ssp_inf_ctp;
+ conf_len = sizeof(sst_ctp_pdata) + sizeof(sst_ctp_bdata);
+ if (conf_len > CTP_MAX_CONFIG_SIZE)
+ return -EINVAL;
+ sst_info->pdata = &sst_ctp_pdata;
+ sst_info->bdata = &sst_ctp_bdata;
+ sst_info->probe_data = &ctp_sst_info;
+ sst_info->ipc_info = &ctp_ipc_info;
+ sst_info->debugfs_data = &ctp_debugfs_data;
+ sst_info->lib_info = NULL;
+
+ return 0;
+}
+
+static void set_mrfld_sst_config(struct sst_platform_info *sst_info)
+{
+ sst_info->ssp_data = &ssp_inf_mrfld;
+ sst_info->pdata = &sst_mrfld_pdata;
+ sst_info->bdata = NULL;
+ sst_info->probe_data = &mrfld_sst_info;
+ sst_info->ipc_info = &mrfld_ipc_info;
+ sst_info->debugfs_data = &mrfld_debugfs_data;
+ sst_info->lib_info = &mrfld_lib_dnld_info;
+
+ return ;
+
+}
+
+static struct sst_platform_info *get_sst_platform_data(struct pci_dev *pdev)
+{
+ struct sst_platform_info *sst_pinfo = NULL;
+
+ set_mrfld_sst_config(&sst_data);
+ sst_pinfo = &sst_data;
+
+ return sst_pinfo;
+}
+
+static void sst_pci_early_quirks(struct pci_dev *pci_dev)
+{
+ pci_dev->dev.platform_data = get_sst_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SST_MRFLD,
+ sst_pci_early_quirks);
--- /dev/null
+/*
+ * platform_otg_pci.c: USB OTG platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/pci.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_USB_DWC3_OTG
+#include <linux/usb/dwc3-intel-mid.h>
+static struct intel_dwc_otg_pdata dwc_otg_pdata;
+
+static bool dwc_otg_get_usbspecoverride(void)
+{
+ void __iomem *usb_comp_iomap;
+ bool usb_spec_override;
+
+ /* Read MISCFLAGS byte from offset 0x717 */
+ usb_comp_iomap = ioremap_nocache(0xFFFCE717, 4);
+ /* MISCFLAGS.BIT[6] indicates USB spec override */
+ usb_spec_override = ioread8(usb_comp_iomap) & 0x40;
+ iounmap(usb_comp_iomap);
+
+ return usb_spec_override;
+}
+
+static struct intel_dwc_otg_pdata *get_otg_platform_data(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG:
+ dwc_otg_pdata.pmic_type = BASIN_COVE;
+ dwc_otg_pdata.charger_detect_enable = 1;
+
+ dwc_otg_pdata.charging_compliance =
+ dwc_otg_get_usbspecoverride();
+
+ return &dwc_otg_pdata;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_USB_PENWELL_OTG
+#include <linux/usb/penwell_otg.h>
+static struct intel_mid_otg_pdata otg_pdata = {
+ .gpio_vbus = 0,
+ .gpio_cs = 0,
+ .gpio_reset = 0,
+ .charging_compliance = 0,
+ .hnp_poll_support = 0,
+ .power_budget = 500
+};
+
+static struct intel_mid_otg_pdata *get_otg_platform_data(struct pci_dev *pdev)
+{
+ struct intel_mid_otg_pdata *pdata = &otg_pdata;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG:
+ dwc_otg_pdata.pmic_type = BASIN_COVE;
+ dwc_otg_pdata.charger_detect_enable = 1;
+
+ dwc_otg_pdata.charging_compliance =
+ dwc_otg_get_usbspecoverride();
+ return &dwc_otg_pdata;
+
+ default:
+ break;
+ }
+
+ return pdata;
+}
+#endif
+
+static void otg_pci_early_quirks(struct pci_dev *pci_dev)
+{
+ pci_dev->dev.platform_data = get_otg_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG,
+ otg_pci_early_quirks);
--- /dev/null
+/*
+ * platform_ads7955.c: ads7955 platform data initialization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lnw_gpio.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include "platform_ads7955.h"
+
+static struct intel_mid_ssp_spi_chip chip = {
+ .burst_size = DFLT_FIFO_BURST_SIZE,
+ .timeout = DFLT_TIMEOUT_VAL,
+ /* SPI DMA is current not usable on Tangier */
+ .dma_enabled = false,
+};
+
+void __init *ads7955_platform_data(void *info)
+{
+ struct spi_board_info *spi_info = info;
+
+ spi_info->mode = SPI_MODE_0;
+
+ spi_info->controller_data = &chip;
+ spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_ads7955.h: ads7955 platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_ADS7955_H_
+#define _PLATFORM_ADS7955_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM 5
+#define FORCE_CHIP_SELECT 0
+
+extern void *ads7955_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_bcove_adc.c: Platform data for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/types.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_basincove_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "platform_bcove_adc.h"
+
+/* SRAM address where the GPADC interrupt register is cached */
+#define GPADC_SRAM_INTR_ADDR 0xfffff615
+
+static struct gpadc_regmap_t basincove_gpadc_regmaps[GPADC_CH_NUM] = {
+ {"VBAT", 5, 0xE9, 0xEA, },
+ {"BATID", 4, 0xEB, 0xEC, },
+ {"IBAT", 5, 0xED, 0xEE, },
+ {"PMICTEMP", 3, 0xCC, 0xCD, },
+ {"BATTEMP0", 2, 0xC8, 0xC9, },
+ {"BATTEMP1", 2, 0xCA, 0xCB, },
+ {"SYSTEMP0", 3, 0xC2, 0xC3, },
+ {"SYSTEMP1", 3, 0xC4, 0xC5, },
+ {"SYSTEMP2", 3, 0xC6, 0xC7, },
+};
+
+static struct gpadc_regs_t basincove_gpadc_regs = {
+ .gpadcreq = 0xDC,
+ .gpadcreq_irqen = (1 << 1),
+ .gpadcreq_busy = (1 << 0),
+ .mirqlvl1 = 0x0C,
+ .mirqlvl1_adc = (1 << 4),
+ .adc1cntl = 0xDD,
+ .adcirq = 0x06,
+ .madcirq = 0x11,
+};
+
+#define MSIC_ADC_MAP(_adc_channel_label, \
+ _consumer_dev_name, \
+ _consumer_channel) \
+ { \
+ .adc_channel_label = _adc_channel_label, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+ }
+
+struct iio_map basincove_iio_maps[] = {
+ MSIC_ADC_MAP("CH0", "VIBAT", "VBAT"),
+ MSIC_ADC_MAP("CH1", "BATID", "BATID"),
+ MSIC_ADC_MAP("CH2", "VIBAT", "IBAT"),
+ MSIC_ADC_MAP("CH3", "PMICTEMP", "PMICTEMP"),
+ MSIC_ADC_MAP("CH4", "BATTEMP", "BATTEMP0"),
+ MSIC_ADC_MAP("CH5", "BATTEMP", "BATTEMP1"),
+ MSIC_ADC_MAP("CH6", "SYSTEMP", "SYSTEMP0"),
+ MSIC_ADC_MAP("CH7", "SYSTEMP", "SYSTEMP1"),
+ MSIC_ADC_MAP("CH8", "SYSTEMP", "SYSTEMP2"),
+ MSIC_ADC_MAP("CH6", "bcove_thrm", "SYSTEMP0"),
+ MSIC_ADC_MAP("CH7", "bcove_thrm", "SYSTEMP1"),
+ MSIC_ADC_MAP("CH8", "bcove_thrm", "SYSTEMP2"),
+ MSIC_ADC_MAP("CH3", "bcove_thrm", "PMICTEMP"),
+ { },
+};
+
+#define MSIC_ADC_CHANNEL(_type, _channel, _datasheet_name) \
+ { \
+ .indexed = 1, \
+ .type = _type, \
+ .channel = _channel, \
+ .datasheet_name = _datasheet_name, \
+ }
+
+static const struct iio_chan_spec const basincove_adc_channels[] = {
+ MSIC_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0"),
+ MSIC_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1"),
+ MSIC_ADC_CHANNEL(IIO_CURRENT, 2, "CH2"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 3, "CH3"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 4, "CH4"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 5, "CH5"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 6, "CH6"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 7, "CH7"),
+ MSIC_ADC_CHANNEL(IIO_TEMP, 8, "CH8"),
+};
+
+static struct intel_basincove_gpadc_platform_data bcove_adc_pdata = {
+ .channel_num = GPADC_CH_NUM,
+ .intr = GPADC_SRAM_INTR_ADDR,
+ .gpadc_iio_maps = basincove_iio_maps,
+ .gpadc_regmaps = basincove_gpadc_regmaps,
+ .gpadc_regs = &basincove_gpadc_regs,
+ .gpadc_channels = basincove_adc_channels,
+};
+
+void __init *bcove_adc_platform_data(void *info)
+{
+ struct platform_device *pdev = NULL;
+ struct sfi_device_table_entry *entry = info;
+ int ret;
+
+ pdev = platform_device_alloc(BCOVE_ADC_DEV_NAME, -1);
+
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ BCOVE_ADC_DEV_NAME);
+ goto out;
+ }
+
+ bcove_adc_pdata.channel_num = GPADC_CH_NUM;
+ bcove_adc_pdata.intr = GPADC_SRAM_INTR_ADDR;
+ bcove_adc_pdata.intr_mask = MBATTEMP | MSYSTEMP | MBATT
+ | MVIBATT | MCCTICK;
+ bcove_adc_pdata.gpadc_iio_maps = basincove_iio_maps;
+ bcove_adc_pdata.gpadc_regmaps = basincove_gpadc_regmaps;
+ bcove_adc_pdata.gpadc_regs = &basincove_gpadc_regs;
+ bcove_adc_pdata.gpadc_channels = basincove_adc_channels;
+
+ pdev->dev.platform_data = &bcove_adc_pdata;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add bcove adc platform device\n");
+ platform_device_put(pdev);
+ goto out;
+ }
+
+ install_irq_resource(pdev, entry->irq);
+
+ register_rpmsg_service("rpmsg_bcove_adc", RPROC_SCU,
+ RP_BCOVE_ADC);
+out:
+ return &bcove_adc_pdata;
+}
--- /dev/null
+/*
+ * platform_bcove_adc.h: Head File for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BCOVE_ADC_H_
+#define _PLATFORM_BCOVE_ADC_H_
+
+#define BCOVE_ADC_DEV_NAME "bcove_adc"
+
+extern void __init *bcove_adc_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_mrfl_bq24261.c: Platform data for bq24261 charger driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/power_supply.h>
+#include <asm/pmic_pdata.h>
+#include <linux/power/bq24261_charger.h>
+#include <asm/intel-mid.h>
+
+#include "platform_ipc.h"
+#include "platform_bq24261.h"
+
+#define BOOST_CUR_LIM 500
+
+static struct power_supply_throttle bq24261_throttle_states[] = {
+ {
+ .throttle_action = PSY_THROTTLE_CC_LIMIT,
+ .throttle_val = BQ24261_CHRG_CUR_NOLIMIT,
+
+ },
+ {
+ .throttle_action = PSY_THROTTLE_CC_LIMIT,
+ .throttle_val = BQ24261_CHRG_CUR_MEDIUM,
+
+ },
+ {
+ .throttle_action = PSY_THROTTLE_DISABLE_CHARGING,
+ },
+ {
+ .throttle_action = PSY_THROTTLE_DISABLE_CHARGER,
+ },
+
+};
+
+char *bq24261_supplied_to[] = {
+ "max170xx_battery",
+ "max17047_battery",
+};
+
+
+void __init *bq24261_platform_data(void *info)
+{
+ static struct bq24261_plat_data bq24261_pdata;
+
+
+ bq24261_pdata.irq_map = PMIC_SRAM_INTR_MAP;
+ bq24261_pdata.irq_mask = PMIC_EXT_INTR_MASK;
+ bq24261_pdata.supplied_to = bq24261_supplied_to;
+ bq24261_pdata.num_supplicants = ARRAY_SIZE(bq24261_supplied_to);
+ bq24261_pdata.throttle_states = bq24261_throttle_states;
+ bq24261_pdata.num_throttle_states = ARRAY_SIZE(bq24261_throttle_states);
+ bq24261_pdata.enable_charger = NULL;
+#ifdef CONFIG_PMIC_CCSM
+ bq24261_pdata.enable_charging = pmic_enable_charging;
+ bq24261_pdata.set_inlmt = pmic_set_ilimma;
+ bq24261_pdata.set_cc = pmic_set_cc;
+ bq24261_pdata.set_cv = pmic_set_cv;
+ bq24261_pdata.dump_master_regs = dump_pmic_regs;
+ bq24261_pdata.enable_vbus = pmic_enable_vbus;
+#endif
+ bq24261_pdata.set_iterm = NULL;
+ bq24261_pdata.boost_mode_ma = BOOST_CUR_LIM;
+
+ return &bq24261_pdata;
+}
--- /dev/null
+/*
+ * platform_mrfl_bq24261.h: platform data for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_BQ24261_H_
+#define _PLATFORM_MRFL_BQ24261_H_
+
+#define MRFL_CHRGR_DEV_NAME "bq24261_charger"
+
+#define PMIC_SRAM_INTR_MAP 0xFFFFF616
+#define PMIC_EXT_INTR_MASK 0x01
+
+#define BQ24261_CHRG_CUR_LOW 100 /* 100mA */
+#define BQ24261_CHRG_CUR_MEDIUM 500 /* 500mA */
+#define BQ24261_CHRG_CUR_HIGH 900 /* 900mA */
+#define BQ24261_CHRG_CUR_NOLIMIT 1500 /* 1500mA */
+
+extern void __init *bq24261_platform_data(
+ void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_btlpm: btlpm platform data initialization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <asm/bcm_bt_lpm.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+
+#define UART_PORT_NO 0 /* Bluetooth is using UART port number 0 */
+
+#define LPM_ON
+
+static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = {
+ .gpio_wake = -EINVAL,
+ .gpio_host_wake = -EINVAL,
+ .int_host_wake = -EINVAL,
+ .gpio_enable = -EINVAL,
+ .port = UART_PORT_NO,
+};
+
+struct platform_device bcm_bt_lpm_device = {
+ .name = "bcm_bt_lpm",
+ .id = 0,
+ .dev = {
+ .platform_data = &bcm_bt_lpm_pdata,
+ },
+};
+
+static int __init bluetooth_init(void)
+{
+
+ int error_reg;
+
+ /* Get the GPIO numbers from the SFI table */
+
+ bcm_bt_lpm_pdata.gpio_enable = get_gpio_by_name("BT-reset");
+ if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_enable)) {
+ pr_err("%s: gpio %s not found\n", __func__, "BT-reset");
+ return -ENODEV;
+ }
+
+#ifdef LPM_ON
+ bcm_bt_lpm_pdata.gpio_host_wake = get_gpio_by_name("bt_uart_enable");
+ if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_host_wake)) {
+ pr_err("%s: gpio %s not found\n", __func__, "bt_uart_enable");
+ return -ENODEV;
+ }
+
+ bcm_bt_lpm_pdata.int_host_wake =
+ gpio_to_irq(bcm_bt_lpm_pdata.gpio_host_wake);
+
+ bcm_bt_lpm_pdata.gpio_wake = get_gpio_by_name("bt_wakeup");
+ if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_wake)) {
+ pr_err("%s: gpio %s not found\n", __func__, "bt_wakeup");
+ return -ENODEV;
+ }
+
+ pr_debug("%s: gpio_wake %d, gpio_host_wake %d\n", __func__,
+ bcm_bt_lpm_pdata.gpio_wake, bcm_bt_lpm_pdata.gpio_host_wake);
+#endif
+
+ error_reg = platform_device_register(&bcm_bt_lpm_device);
+ if (error_reg < 0) {
+ pr_err("%s: platform_device_register for %s failed\n",
+ __func__, bcm_bt_lpm_device.name);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+device_initcall(bluetooth_init);
--- /dev/null
+/*
+ * platform_dw_i2c.c: I2C platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+
+struct i2c_pin_cfg {
+ int scl_gpio;
+ int scl_alt;
+ int sda_gpio;
+ int sda_alt;
+};
+
+enum {
+ BOARD_NONE = 0,
+ BOARD_VTB,
+ BOARD_SALTBAY,
+};
+
+static struct i2c_pin_cfg dw_i2c_pin_cfgs[][10] = {
+ [BOARD_NONE] = {},
+ [BOARD_VTB] = {
+ [1] = {27, 1, 26, 1},
+ },
+ [BOARD_SALTBAY] = {
+ [1] = {19, 1, 20, 1},
+ },
+};
+
+int intel_mid_dw_i2c_abort(int busnum)
+{
+ int i;
+ int ret = -EBUSY;
+ struct i2c_pin_cfg *pins = &dw_i2c_pin_cfgs[BOARD_NONE][busnum];
+
+ switch (intel_mid_identify_cpu()) {
+ case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+ pins = &dw_i2c_pin_cfgs[BOARD_VTB][busnum];
+ break;
+ case INTEL_MID_CPU_CHIP_TANGIER:
+ pins = &dw_i2c_pin_cfgs[BOARD_SALTBAY][busnum];
+ break;
+ default:
+ break;
+ }
+
+ if (!pins->scl_gpio || !pins->sda_gpio) {
+ pr_err("i2c-%d: recovery ignore\n", busnum);
+ return 0;
+ }
+ pr_err("i2c-%d: try to abort xfer, scl_gpio %d, sda_gpio %d\n",
+ busnum, pins->scl_gpio, pins->sda_gpio);
+ gpio_request(pins->scl_gpio, "scl");
+ gpio_request(pins->sda_gpio, "sda");
+ lnw_gpio_set_alt(pins->scl_gpio, LNW_GPIO);
+ lnw_gpio_set_alt(pins->sda_gpio, LNW_GPIO);
+ gpio_direction_input(pins->scl_gpio);
+ gpio_direction_input(pins->sda_gpio);
+ usleep_range(10, 10);
+ pr_err("i2c-%d: scl_gpio val %d, sda_gpio val %d\n",
+ busnum,
+ gpio_get_value(pins->scl_gpio) ? 1 : 0,
+ gpio_get_value(pins->sda_gpio) ? 1 : 0);
+ gpio_direction_output(pins->scl_gpio, 1);
+ pr_err("i2c-%d: toggle begin\n", busnum);
+ for (i = 0; i < 9; i++) {
+ if (gpio_get_value(pins->sda_gpio)) {
+ if (gpio_get_value(pins->scl_gpio)) {
+ pr_err("i2c-%d: recovery success\n", busnum);
+ break;
+ } else {
+ gpio_direction_output(pins->scl_gpio, 0);
+ pr_err("i2c-%d: scl_gpio val 0, sda_gpio val 1\n",
+ busnum);
+ }
+ }
+ gpio_set_value(pins->scl_gpio, 0);
+ usleep_range(10, 20);
+ gpio_set_value(pins->scl_gpio, 1);
+ usleep_range(10, 20);
+ pr_err("i2c-%d: toggle SCL loop %d\n", busnum, i);
+ }
+ pr_err("i2c-%d: toggle end\n", busnum);
+ gpio_direction_output(pins->scl_gpio, 1);
+ gpio_direction_output(pins->sda_gpio, 0);
+ gpio_set_value(pins->scl_gpio, 0);
+ usleep_range(10, 20);
+ gpio_set_value(pins->scl_gpio, 1);
+ usleep_range(10, 20);
+ gpio_set_value(pins->sda_gpio, 0);
+ lnw_gpio_set_alt(pins->scl_gpio, pins->scl_alt);
+ lnw_gpio_set_alt(pins->sda_gpio, pins->sda_alt);
+ usleep_range(10, 10);
+ gpio_free(pins->scl_gpio);
+ gpio_free(pins->sda_gpio);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_abort);
--- /dev/null
+/*
+ * platform_emc1403.c: emc1403 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+#include "platform_emc1403.h"
+
+void __init *emc1403_platform_data(void *info)
+{
+ static short intr2nd_pdata;
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("thermal_int");
+ int intr2nd = get_gpio_by_name("thermal_alert");
+
+ if (intr == -1 || intr2nd == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+ return &intr2nd_pdata;
+}
--- /dev/null
+/*
+ * platform_emc1403.h: emc1403 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_EMC1403_H_
+#define _PLATFORM_EMC1403_H_
+
+extern void __init *emc1403_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_gpio_keys.c: gpio_keys platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include "platform_gpio_keys.h"
+
+/*
+ * we will search these buttons in SFI GPIO table (by name)
+ * and register them dynamically. Please add all possible
+ * buttons here, we will shrink them if no GPIO found.
+ */
+static struct gpio_keys_button gpio_button[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = -1, /* GPIO number */
+ .active_low = 1,
+ .desc = "power_btn",/*Button description*/
+ .type = EV_KEY,
+ .wakeup = 0,
+ .debounce_interval = 3000,
+ },
+ {
+ .code = KEY_PROG1,
+ .gpio = 61,
+ .active_low = 1,
+ .desc = "SW1UI4",
+ .type = EV_KEY,
+ .wakeup = 0,
+ .debounce_interval = 50,
+ },
+};
+
+static struct gpio_keys_platform_data gpio_keys = {
+ .buttons = gpio_button,
+ .rep = 1,
+ .nbuttons = -1, /* will fill it after search */
+};
+
+static struct platform_device pb_device = {
+ .name = DEVICE_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_keys,
+ },
+};
+
+/*
+ * Shrink the non-existent buttons, register the gpio button
+ * device if there is some
+ */
+static int __init pb_keys_init(void)
+{
+ struct gpio_keys_button *gb = gpio_button;
+ int i, num, good = 0;
+
+ num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
+ for (i = 0; i < num; i++) {
+ pr_info("info[%2d]: name = %s, gpio = %d\n",
+ i, gb[i].desc, gb[i].gpio);
+ if (gb[i].gpio == -1)
+ continue;
+
+ if (i != good)
+ gb[good] = gb[i];
+ good++;
+ }
+
+ if (good) {
+ gpio_keys.nbuttons = good;
+ return platform_device_register(&pb_device);
+ }
+ return 0;
+}
+late_initcall(pb_keys_init);
--- /dev/null
+/*
+ * platform_gpio_keys.h: gpio_keys platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_GPIO_KEYS_H_
+#define _PLATFORM_GPIO_KEYS_H_
+
+#define DEVICE_NAME "gpio-keys"
+#endif
--- /dev/null
+/*
+ * platform_hsu.c: hsu platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#include "platform_hsu.h"
+
+#define TNG_CLOCK_CTL 0xFF00B830
+#define TNG_CLOCK_SC 0xFF00B868
+
+#define VLV_HSU_CLOCK 0x0800
+#define VLV_HSU_RESET 0x0804
+
+static unsigned int clock;
+static struct hsu_port_pin_cfg *hsu_port_gpio_mux;
+static struct hsu_port_cfg *platform_hsu_info;
+
+static struct
+hsu_port_pin_cfg hsu_port_pin_cfgs[][hsu_pid_max][hsu_port_max] = {
+ [hsu_tng] = {
+ [hsu_pid_def] = {
+ [hsu_port0] = {
+ .id = 0,
+ .name = HSU_BT_PORT,
+ .rx_gpio = 126,
+ .rx_alt = 1,
+ .tx_gpio = 127,
+ .tx_alt = 1,
+ .cts_gpio = 124,
+ .cts_alt = 1,
+ .rts_gpio = 125,
+ .rts_alt = 1,
+ },
+ [hsu_port1] = {
+ .id = 1,
+ .name = HSU_UART1_PORT,
+ .wake_gpio = 130,
+ .rx_gpio = 130,
+ .rx_alt = 1,
+ .tx_gpio = 131,
+ .tx_alt = 1,
+ .cts_gpio = 128,
+ .cts_alt = 1,
+ .rts_gpio = 129,
+ .rts_alt = 1,
+ },
+ [hsu_port2] = {
+ .id = 2,
+ .name = HSU_UART2_PORT,
+ .wake_gpio = 134,
+ .rx_gpio = 134,
+ .rx_alt = 1,
+ .cts_gpio = 132,
+ .cts_alt = 1,
+ .rts_gpio = 133,
+ .rts_alt = 1,
+ },
+ },
+ },
+
+};
+
+static struct hsu_port_cfg hsu_port_cfgs[][hsu_port_max] = {
+ [hsu_tng] = {
+ [hsu_port0] = {
+ .type = bt_port,
+ .hw_ip = hsu_intel,
+ .index = 0,
+ .name = HSU_BT_PORT,
+ .idle = 20,
+ .hw_init = intel_mid_hsu_init,
+ .hw_set_alt = intel_mid_hsu_switch,
+ .hw_set_rts = intel_mid_hsu_rts,
+ .hw_suspend = intel_mid_hsu_suspend,
+ .hw_resume = intel_mid_hsu_resume,
+ .hw_get_clk = intel_mid_hsu_get_clk,
+ .hw_context_save = 1,
+ },
+ [hsu_port1] = {
+ .type = gps_port,
+ .hw_ip = hsu_intel,
+ .index = 1,
+ .name = HSU_UART1_PORT,
+ .idle = 30,
+ .preamble = 1,
+ .hw_init = intel_mid_hsu_init,
+ .hw_set_alt = intel_mid_hsu_switch,
+ .hw_set_rts = intel_mid_hsu_rts,
+ .hw_suspend = intel_mid_hsu_suspend,
+ .hw_suspend_post = intel_mid_hsu_suspend_post,
+ .hw_resume = intel_mid_hsu_resume,
+ .hw_get_clk = intel_mid_hsu_get_clk,
+ .hw_context_save = 1,
+ },
+ [hsu_port2] = {
+ .type = debug_port,
+ .hw_ip = hsu_intel,
+ .index = 2,
+ .name = HSU_UART2_PORT,
+ .idle = 5000,
+ .hw_init = intel_mid_hsu_init,
+ .hw_set_alt = intel_mid_hsu_switch,
+ .hw_suspend = intel_mid_hsu_suspend,
+ .hw_resume = intel_mid_hsu_resume,
+ .hw_get_clk = intel_mid_hsu_get_clk,
+ .hw_context_save = 1,
+ },
+ },
+};
+
+static struct hsu_func2port hsu_port_func_id_tlb[][hsu_port_func_max] = {
+ [hsu_tng] = {
+ [0] = {
+ .func = 0,
+ .port = -1,
+ },
+ [1] = {
+ .func = 1,
+ .port = hsu_port0,
+ },
+ [2] = {
+ .func = 2,
+ .port = hsu_port1,
+ },
+ [3] = {
+ .func = 3,
+ .port = hsu_port2,
+ },
+ },
+};
+
+static void hsu_port_enable(int port)
+{
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ if (info->rx_gpio) {
+ lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+ gpio_direction_input(info->rx_gpio);
+ }
+ if (info->tx_gpio) {
+ gpio_direction_output(info->tx_gpio, 1);
+ lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+ usleep_range(10, 10);
+ gpio_direction_output(info->tx_gpio, 0);
+
+ }
+ if (info->cts_gpio) {
+ lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+ gpio_direction_input(info->cts_gpio);
+ }
+ if (info->rts_gpio) {
+ gpio_direction_output(info->rts_gpio, 0);
+ lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+ }
+}
+
+static void hsu_port_disable(int port)
+{
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ if (info->rx_gpio) {
+ lnw_gpio_set_alt(info->rx_gpio, LNW_GPIO);
+ gpio_direction_input(info->rx_gpio);
+ }
+ if (info->tx_gpio) {
+ gpio_direction_output(info->tx_gpio, 1);
+ lnw_gpio_set_alt(info->tx_gpio, LNW_GPIO);
+ usleep_range(10, 10);
+ gpio_direction_input(info->tx_gpio);
+ }
+ if (info->cts_gpio) {
+ lnw_gpio_set_alt(info->cts_gpio, LNW_GPIO);
+ gpio_direction_input(info->cts_gpio);
+ }
+ if (info->rts_gpio) {
+ lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+ gpio_direction_input(info->rts_gpio);
+ }
+}
+
+void intel_mid_hsu_suspend(int port, struct device *dev,
+ irq_handler_t wake_isr)
+{
+ int ret;
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ info->dev = dev;
+ info->wake_isr = wake_isr;
+
+ if (info->wake_gpio) {
+ lnw_gpio_set_alt(info->wake_gpio, LNW_GPIO);
+ gpio_direction_input(info->wake_gpio);
+ udelay(10);
+ ret = request_irq(gpio_to_irq(info->wake_gpio), info->wake_isr,
+ IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING,
+ info->name, info->dev);
+ if (ret)
+ dev_err(info->dev, "failed to register wakeup irq\n");
+ }
+}
+
+void intel_mid_hsu_resume(int port, struct device *dev)
+{
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ if (info->wake_gpio)
+ free_irq(gpio_to_irq(info->wake_gpio), info->dev);
+
+ if (info->rx_gpio) {
+ lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+ gpio_direction_input(info->rx_gpio);
+ }
+ if (info->tx_gpio) {
+ gpio_direction_output(info->tx_gpio, 1);
+ lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+ usleep_range(10, 10);
+ gpio_direction_output(info->tx_gpio, 0);
+
+ }
+ if (info->cts_gpio) {
+ lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+ gpio_direction_input(info->cts_gpio);
+ }
+}
+
+void intel_mid_hsu_switch(int port)
+{
+ int i;
+ struct hsu_port_pin_cfg *tmp;
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ for (i = 0; i < hsu_port_max; i++) {
+ tmp = hsu_port_gpio_mux + i;
+ if (tmp != info && tmp->id == info->id)
+ hsu_port_disable(i);
+ }
+ hsu_port_enable(port);
+}
+
+void intel_mid_hsu_rts(int port, int value)
+{
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ if (!info->rts_gpio)
+ return;
+
+ if (value) {
+ gpio_direction_output(info->rts_gpio, 1);
+ lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+ } else
+ lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+}
+
+void intel_mid_hsu_suspend_post(int port)
+{
+ struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+ if (info->rts_gpio && info->wake_gpio
+ && info->wake_gpio == info->rx_gpio) {
+ gpio_direction_output(info->rts_gpio, 0);
+ lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+ }
+}
+
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+ void __iomem *addr)
+{
+ unsigned int param, update_bit;
+
+ update_bit = 1 << 31;
+ param = (m << 1) | (n << 16) | 0x1;
+
+ writel(param, addr + VLV_HSU_CLOCK);
+ writel((param | update_bit), addr + VLV_HSU_CLOCK);
+ writel(param, addr + VLV_HSU_CLOCK);
+}
+
+void intel_mid_hsu_reset(void __iomem *addr)
+{
+ writel(0, addr + VLV_HSU_RESET);
+ writel(3, addr + VLV_HSU_RESET);
+}
+
+unsigned int intel_mid_hsu_get_clk(void)
+{
+ return clock;
+}
+
+int intel_mid_hsu_func_to_port(unsigned int func)
+{
+ int i;
+ struct hsu_func2port *tbl = NULL;
+
+ switch (intel_mid_identify_cpu()) {
+ case INTEL_MID_CPU_CHIP_TANGIER:
+ tbl = &hsu_port_func_id_tlb[hsu_tng][0];
+ break;
+ default:
+ /* FIXME: VALLEYVIEW2? */
+ /* 1e.3 and 1e.4 */
+ tbl = &hsu_port_func_id_tlb[hsu_vlv2][0];
+ break;
+ }
+
+ for (i = 0; i < hsu_port_func_max; i++) {
+ if (tbl->func == func)
+ return tbl->port;
+ tbl++;
+ }
+
+ return -1;
+}
+
+int intel_mid_hsu_init(struct device *dev, int port)
+{
+ struct hsu_port_cfg *port_cfg = platform_hsu_info + port;
+ struct hsu_port_pin_cfg *info;
+
+ if (port >= hsu_port_max)
+ return -ENODEV;
+
+ port_cfg->dev = dev;
+
+ info = hsu_port_gpio_mux + port;
+ if (info->wake_gpio) {
+ gpio_request(info->wake_gpio, "hsu");
+ }
+ if (info->rx_gpio) {
+ gpio_request(info->rx_gpio, "hsu");
+ gpio_export(info->rx_gpio, 1);
+ }
+ if (info->tx_gpio) {
+ gpio_request(info->tx_gpio, "hsu");
+ gpio_export(info->tx_gpio, 1);
+ }
+ if (info->cts_gpio) {
+ gpio_request(info->cts_gpio, "hsu");
+ gpio_export(info->cts_gpio, 1);
+ }
+ if (info->rts_gpio) {
+ gpio_request(info->rts_gpio, "hsu");
+ gpio_export(info->rts_gpio, 1);
+ }
+
+ return 1;
+}
+
+static void hsu_platform_clk(enum intel_mid_cpu_type cpu_type)
+{
+ void __iomem *clkctl, *clksc;
+ u32 clk_src, clk_div;
+
+ switch (cpu_type) {
+ case INTEL_MID_CPU_CHIP_TANGIER:
+ clock = 100000;
+ clkctl = ioremap_nocache(TNG_CLOCK_CTL, 4);
+ if (!clkctl) {
+ pr_err("tng scu clk ctl ioremap error\n");
+ break;
+ }
+
+ clksc = ioremap_nocache(TNG_CLOCK_SC, 4);
+ if (!clksc) {
+ pr_err("tng scu clk sc ioremap error\n");
+ iounmap(clkctl);
+ break;
+ }
+
+ clk_src = readl(clkctl);
+ clk_div = readl(clksc);
+
+ if (clk_src & (1 << 16))
+ /* source SCU fabric 100M */
+ clock = clock / ((clk_div & 0x7) + 1);
+ else {
+ if (clk_src & (1 << 31))
+ /* source OSCX2 38.4M */
+ clock = 38400;
+ else
+ /* source OSC clock 19.2M */
+ clock = 19200;
+ }
+
+ iounmap(clkctl);
+ iounmap(clksc);
+ break;
+
+ default:
+ /* FIXME: VALLEYVIEW2? */
+ clock = 100000;
+ break;
+ }
+
+ pr_info("hsu core clock %u M\n", clock / 1000);
+}
+
+static __init int hsu_dev_platform_data(void)
+{
+ switch (intel_mid_identify_cpu()) {
+ case INTEL_MID_CPU_CHIP_TANGIER:
+ platform_hsu_info = &hsu_port_cfgs[hsu_tng][0];
+ hsu_port_gpio_mux = &hsu_port_pin_cfgs[hsu_tng][hsu_pid_def][0];
+ break;
+ default:
+ platform_hsu_info = NULL;
+ hsu_port_gpio_mux = NULL;
+ break;
+ }
+
+ if (platform_hsu_info == NULL)
+ return -ENODEV;
+
+ if (hsu_port_gpio_mux == NULL)
+ return -ENODEV;
+
+ hsu_register_board_info(platform_hsu_info);
+ hsu_platform_clk(intel_mid_identify_cpu());
+
+ return 0;
+}
+
+fs_initcall(hsu_dev_platform_data);
--- /dev/null
+/*
+ * platform_hsu.h: hsu platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_HSU_H_
+#define _PLATFORM_HSU_H_
+
+#define HSU_BT_PORT "hsu_bt_port"
+#define HSU_UART0_PORT "hsu_uart0_port"
+#define HSU_UART1_PORT "hsu_uart1_port"
+#define HSU_UART2_PORT "hsu_uart2_port"
+
+enum hsu_core {
+ hsu_pnw,
+ hsu_clv,
+ hsu_tng,
+ hsu_vlv2,
+};
+
+enum hsu_pid {
+ hsu_pid_def = 0,
+ hsu_pid_rhb = 0,
+ hsu_pid_vtb_pro = 1,
+ hsu_pid_vtb_eng = 2,
+ hsu_pid_max,
+};
+
+struct hsu_func2port {
+ int func;
+ int port;
+};
+
+struct hsu_port_pin_cfg {
+ char *name;
+ int id;
+ int wake_gpio;
+ int rx_gpio;
+ int rx_alt;
+ int tx_gpio;
+ int tx_alt;
+ int cts_gpio;
+ int cts_alt;
+ int rts_gpio;
+ int rts_alt;
+ struct device *dev;
+ irq_handler_t wake_isr;
+};
+
+#endif
--- /dev/null
+/*
+ * platform_ipc.c: IPC platform library file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sfi.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc.h"
+
+void ipc_device_handler(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev) {
+ void *pdata = NULL;
+ /*
+ * IPC device creation is handled by the MSIC
+ * MFD driver so we don't need to do it here.
+ */
+
+ /*
+ * We need to call platform init of IPC devices to fill
+ * misc_pdata structure. It will be used in msic_init for
+ * initialization.
+ */
+ pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+ pentry->name, pentry->irq);
+ if (dev != NULL)
+ pdata = dev->get_platform_data(pentry);
+}
--- /dev/null
+/*
+ * platform_ipc.h: IPC platform library header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_IPC_H_
+#define _PLATFORM_IPC_H_
+
+extern void ipc_device_handler(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_lis331.c: lis331 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_lis331.h"
+
+void __init *lis331dl_platform_data(void *info)
+{
+ static short intr2nd_pdata;
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("accel_int");
+ int intr2nd = get_gpio_by_name("accel_2");
+
+ if (intr == -1 || intr2nd == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+ return &intr2nd_pdata;
+}
--- /dev/null
+/*
+ * platform_lis331.h: lis331 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_LIS331_H_
+#define _PLATFORM_LIS331_H_
+
+extern void __init *lis331dl_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_max3111.c: max3111 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lnw_gpio.h>
+#include <linux/serial_max3110.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include "platform_max3111.h"
+
+static struct intel_mid_ssp_spi_chip chip = {
+ .burst_size = DFLT_FIFO_BURST_SIZE,
+ .timeout = DFLT_TIMEOUT_VAL,
+ /* UART DMA is not supported in VP */
+ .dma_enabled = false,
+};
+
+void __init *max3111_platform_data(void *info)
+{
+ struct spi_board_info *spi_info = info;
+ int intr;
+ static struct plat_max3110 max3110_pdata;
+
+ spi_info->mode = SPI_MODE_0;
+
+ /* max 3110 interrupt not supported by sim platforms */
+ if (intel_mid_identify_sim()) {
+ spi_info->controller_data = &chip;
+ spi_info->bus_num = FORCE_SPI_BUS_NUM;
+ return &max3110_pdata;
+ }
+
+ spi_info->controller_data = &chip;
+ spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+ /* use fast_int_1 (IRQ 41) on MRFL */
+ max3110_pdata.irq_edge_triggered = 0;
+
+ /*force polling for HVP and VP simulation platforms
+ * on TANGIER AND ANNIEDALE.
+ */
+ if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP) ||
+ (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)) {
+ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+ (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+ spi_info->irq = 0;
+ }
+ }
+
+ return &max3110_pdata;
+}
--- /dev/null
+/*
+ * platform_max3111.h: max3111 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX3111_H_
+#define _PLATFORM_MAX3111_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM 5
+#define FORCE_CHIP_SELECT 0
+
+extern void *max3111_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_max7315.c: max7315 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <asm/intel-mid.h>
+#include "platform_max7315.h"
+
+
+void __init *max7315_platform_data(void *info)
+{
+ static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
+ static int nr;
+ struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ if (nr >= MAX7315_NUM) {
+ pr_err("too many max7315s, we only support %d\n",
+ MAX7315_NUM);
+ return NULL;
+ }
+ /* we have several max7315 on the board, we only need load several
+ * instances of the same pca953x driver to cover them
+ */
+ strcpy(i2c_info->type, "max7315");
+ if (nr++) {
+ snprintf(base_pin_name, sizeof(base_pin_name),
+ "max7315_%d_base", nr);
+ snprintf(intr_pin_name, sizeof(intr_pin_name),
+ "max7315_%d_int", nr);
+ } else {
+ strcpy(base_pin_name, "max7315_base");
+ strcpy(intr_pin_name, "max7315_int");
+ }
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ max7315->gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ max7315->irq_base = -1;
+ }
+ return max7315;
+}
--- /dev/null
+/*
+ * platform_max7315.h: max7315 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX7315_H_
+#define _PLATFORM_MAX7315_H_
+
+/* we have multiple max7315 on the board ... */
+#define MAX7315_NUM 2
+
+extern void __init *max7315_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_mid_pwm.c: mid_pwm platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/lnw_gpio.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_pwm.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "platform_mid_pwm.h"
+
+static struct intel_mid_pwm_device_data mfld_pwms[] = {
+ [PWM_LED] = {
+ .reg_clkdiv0 = 0x62,
+ .reg_clkdiv1 = 0x61,
+ .reg_dutycyc = 0x67,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x03,
+ },
+ [PWM_VIBRATOR] = {
+ .reg_clkdiv0 = 0x64,
+ .reg_clkdiv1 = 0x63,
+ .reg_dutycyc = 0x68,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x03,
+ },
+ [PWM_LCD_BACKLIGHT] = {
+ .reg_clkdiv0 = 0x66,
+ .reg_clkdiv1 = 0x65,
+ .reg_dutycyc = 0x69,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x03,
+ },
+};
+
+static struct intel_mid_pwm_device_data ctp_pwms[] = {
+ [PWM_LED] = {
+ .reg_clkdiv0 = 0x62,
+ .reg_clkdiv1 = 0x61,
+ .reg_dutycyc = 0x67,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x00,
+ },
+ [PWM_VIBRATOR] = {
+ .reg_clkdiv0 = 0x64,
+ .reg_clkdiv1 = 0x63,
+ .reg_dutycyc = 0x68,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x03,
+ },
+ [PWM_LCD_BACKLIGHT] = {
+ .reg_clkdiv0 = 0x66,
+ .reg_clkdiv1 = 0x65,
+ .reg_dutycyc = 0x69,
+ .val_clkdiv1 = 0x00,
+ .val_clkdiv0 = 0x03,
+ },
+};
+
+static struct intel_mid_pwm_platform_data pdata[] = {
+ [mfld_pwm] = {
+ .pwm_num = PWM_NUM,
+ .ddata = mfld_pwms,
+ .reg_clksel = 0x38F,
+ .val_clksel = 0x01,
+ },
+ [ctp_pwm] = {
+ .pwm_num = PWM_NUM,
+ .ddata = ctp_pwms,
+ .reg_clksel = 0x38F,
+ .val_clksel = 0x00,
+ },
+};
+
+static void *get_pwm_platform_data(void)
+{
+ pr_info("%s, MFLD board detected\n", __func__);
+ return &pdata[mfld_pwm];
+}
+
+static int __init intel_mid_pwm_init(void)
+{
+ struct platform_device *pdev = NULL;
+ int ret = 0;
+
+ pdev = platform_device_alloc(DEVICE_NAME, -1);
+
+ if (!pdev) {
+ pr_err("out of memory for platform dev %s\n",
+ DEVICE_NAME);
+ return -1;
+ }
+
+ pdev->dev.platform_data = get_pwm_platform_data();
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add platform device %s\n",
+ DEVICE_NAME);
+ platform_device_put(pdev);
+ return -1;
+ }
+
+ return 0;
+}
+
+fs_initcall(intel_mid_pwm_init);
--- /dev/null
+/*
+ * platform_mid_pwm.h: mid_pwm platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MID_PWM_H_
+#define _PLATFORM_MID_PWM_H_
+
+#define DEVICE_NAME "intel_mid_pwm"
+
+enum {
+ mfld_pwm,
+ ctp_pwm,
+};
+#endif
--- /dev/null
+/*
+ * platform_mpu3050.c: mpu3050 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+#include "platform_mpu3050.h"
+
+void *mpu3050_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("mpu3050_int");
+
+ if (intr == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_mpu3050.h: mpu3050 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MPU3050_H_
+#define _PLATFORM_MPU3050_H_
+
+extern void *mpu3050_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_mrfl_ocd.c: Platform data for Merrifield Platform OCD Driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_basincove_ocd.h>
+
+#include "platform_msic.h"
+#include "platform_mrfl_ocd.h"
+
+static int get_bcu_config(struct ocd_bcove_config_data *ocd_smip_data)
+{
+ int i;
+ void __iomem *bcu_smip_sram_addr;
+ u8 *plat_smip_data;
+
+ if (!ocd_smip_data)
+ return -ENXIO;
+
+ plat_smip_data = (u8 *)ocd_smip_data;
+ bcu_smip_sram_addr = ioremap_nocache(MRFL_SMIP_SRAM_ADDR +
+ BCU_SMIP_OFFSET, NUM_SMIP_BYTES);
+
+ for (i = 0; i < NUM_SMIP_BYTES; i++)
+ *(plat_smip_data + i) = ioread8(bcu_smip_sram_addr + i);
+
+ return 0;
+}
+
+static struct ocd_platform_data ocd_data;
+
+void __init *mrfl_ocd_platform_data(void *info)
+{
+ struct sfi_device_table_entry *entry = info;
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(MRFL_OCD_DEV_NAME, -1);
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ MRFL_OCD_DEV_NAME);
+ return NULL;
+ }
+
+ if (platform_device_add(pdev)) {
+ pr_err("failed to add merrifield ocd platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ install_irq_resource(pdev, entry->irq);
+ ocd_data.bcu_config_data = &get_bcu_config;
+ pdev->dev.platform_data = &ocd_data;
+ register_rpmsg_service("rpmsg_mrfl_ocd", RPROC_SCU, RP_MRFL_OCD);
+
+ return &ocd_data;
+}
--- /dev/null
+/*
+ * platform_mrfl_ocd.h: msic_thermal platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_OCD_H_
+#define _PLATFORM_MRFL_OCD_H_
+
+#define MRFL_OCD_DEV_NAME "bcove_bcu"
+
+extern void __init *mrfl_ocd_platform_data(void *info)
+ __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_mrfl_pmic.c: Platform data for Merrifield PMIC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic.h"
+
+void __init *mrfl_pmic_ccsm_platform_data(void *info)
+{
+ struct sfi_device_table_entry *entry = info;
+ static struct pmic_platform_data pmic_pdata;
+ struct platform_device *pdev = NULL;
+ int ret;
+
+ pdev = platform_device_alloc(entry->name, -1);
+ if (!pdev) {
+ pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+ goto out;
+ }
+ pdev->dev.platform_data = &pmic_pdata;
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("Failed to add adc platform device\n");
+ platform_device_put(pdev);
+ goto out;
+ }
+ install_irq_resource(pdev, entry->irq);
+#ifdef CONFIG_BQ24261_CHARGER
+ pmic_pdata.cc_to_reg = bq24261_cc_to_reg;
+ pmic_pdata.cv_to_reg = bq24261_cv_to_reg;
+#endif
+ register_rpmsg_service("rpmsg_pmic_ccsm", RPROC_SCU,
+ RP_PMIC_CCSM);
+out:
+ return &pmic_pdata;
+}
+
--- /dev/null
+/*
+ * platform_mrfl_pmic.h: platform data for pmic driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_H_
+#define _PLATFORM_MRFL_PMIC_H_
+
+extern void __init *mrfl_pmic_ccsm_platform_data(
+ void *info) __attribute__((weak));
+
+#endif
--- /dev/null
+/*
+ * platform_mrfl_pmic_i2c.c: Platform data for Merrifield PMIC I2C
+ * adapter driver.
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic_i2c.h"
+
+void __init *mrfl_pmic_i2c_platform_data(void *info)
+{
+ struct sfi_device_table_entry *entry = info;
+ struct platform_device *pdev = NULL;
+ int ret;
+
+ pdev = platform_device_alloc(entry->name, -1);
+ if (!pdev) {
+ pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+ goto out;
+ }
+ pdev->dev.platform_data = NULL;
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("Failed to add adc platform device\n");
+ platform_device_put(pdev);
+ goto out;
+ }
+ install_irq_resource(pdev, entry->irq);
+ register_rpmsg_service("rpmsg_i2c_pmic_adap", RPROC_SCU,
+ RP_PMIC_I2C);
+out:
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_mrfl_pmic_i2c.h: platform data for pmic i2c adapter driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_I2C_H_
+#define _PLATFORM_MRFL_PMIC_I2C_H_
+
+extern void __init *mrfl_pmic_i2c_platform_data(
+ void *info) __attribute__((weak));
+
+#endif
--- /dev/null
+/*
+ * platform_mrfl_regulator.c - Merrifield regulator machine drvier
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regulator/intel_basin_cove_pmic.h>
+#include <linux/regulator/machine.h>
+
+#include <asm/intel-mid.h>
+
+/***********VPROG1 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog1_consumer[] = {
+};
+static struct regulator_init_data vprog1_data = {
+ .constraints = {
+ .min_uV = 1500000,
+ .max_uV = 2800000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vprog1_consumer),
+ .consumer_supplies = vprog1_consumer,
+};
+
+static struct intel_pmic_info vprog1_info = {
+ .pmic_reg = VPROG1CNT_ADDR,
+ .init_data = &vprog1_data,
+ .table_len = ARRAY_SIZE(VPROG1_VSEL_table),
+ .table = VPROG1_VSEL_table,
+};
+static struct platform_device vprog1_device = {
+ .name = "intel_regulator",
+ .id = VPROG1,
+ .dev = {
+ .platform_data = &vprog1_info,
+ },
+};
+/***********VPROG2 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog2_consumer[] = {
+};
+static struct regulator_init_data vprog2_data = {
+ .constraints = {
+ .min_uV = 1500000,
+ .max_uV = 2850000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vprog2_consumer),
+ .consumer_supplies = vprog2_consumer,
+};
+static struct intel_pmic_info vprog2_info = {
+ .pmic_reg = VPROG2CNT_ADDR,
+ .init_data = &vprog2_data,
+ .table_len = ARRAY_SIZE(VPROG2_VSEL_table),
+ .table = VPROG2_VSEL_table,
+};
+static struct platform_device vprog2_device = {
+ .name = "intel_regulator",
+ .id = VPROG2,
+ .dev = {
+ .platform_data = &vprog2_info,
+ },
+};
+
+/***********VPROG3 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog3_consumer[] = {
+};
+static struct regulator_init_data vprog3_data = {
+ .constraints = {
+ .min_uV = 1050000,
+ .max_uV = 2800000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vprog2_consumer),
+ .consumer_supplies = vprog3_consumer,
+};
+static struct intel_pmic_info vprog3_info = {
+ .pmic_reg = VPROG3CNT_ADDR,
+ .init_data = &vprog3_data,
+ .table_len = ARRAY_SIZE(VPROG3_VSEL_table),
+ .table = VPROG3_VSEL_table,
+};
+static struct platform_device vprog3_device = {
+ .name = "intel_regulator",
+ .id = VPROG3,
+ .dev = {
+ .platform_data = &vprog3_info,
+ },
+};
+
+static struct platform_device *regulator_devices[] __initdata = {
+ &vprog1_device,
+ &vprog2_device,
+ &vprog3_device,
+};
+
+static int __init regulator_init(void)
+{
+ /* register the regulator only if SoC is Tangier */
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ platform_add_devices(regulator_devices,
+ ARRAY_SIZE(regulator_devices));
+
+ return 0;
+}
+device_initcall(regulator_init);
--- /dev/null
+/*
+ * platform_mrfl_thermal.c: Platform data initilization file for
+ * Intel Merrifield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_mrfl_thermal.h"
+
+/* 'enum' of Thermal ADC channels */
+enum thermal_adc_channels { SYS0, SYS1, SYS2, PMIC_DIE };
+
+static int linear_temp_correlation(void *info, long temp, long *res)
+{
+ struct intel_mid_thermal_sensor *sensor = info;
+
+ *res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+ return 0;
+}
+
+/*
+ * Naming convention:
+ * skin0 -> front skin,
+ * skin1--> back skin
+ */
+
+static struct intel_mid_thermal_sensor mrfl_sensors[] = {
+ {
+ .name = SKIN0_NAME,
+ .index = SYS2,
+ .slope = 969,
+ .intercept = -3741,
+ .temp_correlation = linear_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = SKIN1_NAME,
+ .index = SYS0,
+ .slope = 966,
+ .intercept = -2052,
+ .temp_correlation = linear_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = MSIC_DIE_NAME,
+ .index = PMIC_DIE,
+ .slope = 1000,
+ .intercept = 0,
+ .temp_correlation = linear_temp_correlation,
+ .direct = true,
+ },
+};
+
+/* Bodegabay - PRh thermal sensor list */
+static struct intel_mid_thermal_sensor bdgb_sensors[] = {
+ {
+ .name = SKIN0_NAME,
+ .index = SYS0,
+ .slope = 410,
+ .intercept = 16808,
+ .temp_correlation = linear_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = SKIN1_NAME,
+ .index = SYS0,
+ .slope = 665,
+ .intercept = 8375,
+ .temp_correlation = linear_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = MSIC_DIE_NAME,
+ .index = PMIC_DIE,
+ .slope = 1000,
+ .intercept = 0,
+ .temp_correlation = linear_temp_correlation,
+ .direct = true,
+ },
+};
+
+static struct intel_mid_thermal_platform_data pdata[] = {
+ [mrfl_thermal] = {
+ .num_sensors = 3,
+ .sensors = mrfl_sensors,
+ },
+ [bdgb_thermal] = {
+ .num_sensors = 3,
+ .sensors = bdgb_sensors,
+ },
+};
+
+void __init *mrfl_thermal_platform_data(void *info)
+{
+ struct platform_device *pdev;
+ struct sfi_device_table_entry *entry = info;
+
+ pdev = platform_device_alloc(MRFL_THERM_DEV_NAME, -1);
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ MRFL_THERM_DEV_NAME);
+ return NULL;
+ }
+
+ if (platform_device_add(pdev)) {
+ pr_err("failed to add thermal platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ pdev->dev.platform_data = &pdata[mrfl_thermal];
+
+ install_irq_resource(pdev, entry->irq);
+ register_rpmsg_service("rpmsg_mrfl_thermal", RPROC_SCU,
+ RP_BCOVE_THERMAL);
+
+ return 0;
+}
--- /dev/null
+/*
+ * platform_mrfl_thermal.h: Platform data initilization file for
+ * Intel Merrifield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_THERMAL_H_
+#define _PLATFORM_MRFL_THERMAL_H_
+
+#define MRFL_THERM_DEV_NAME "bcove_thrm"
+
+extern void __init *mrfl_thermal_platform_data(void *)
+ __attribute__((weak));
+
+enum {
+ mrfl_thermal,
+ bdgb_thermal,
+};
+
+#endif
--- /dev/null
+/*
+ * platform_mrfld_audio.c: MRFLD audio platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ * Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <asm/platform_sst_audio.h>
+#include "platform_mrfld_audio.h"
+#include "platform_msic.h"
+#include "platform_wm8994.h"
+
+static char* audio_codec = "dummy";
+module_param(audio_codec, charp, S_IRUSR);
+MODULE_PARM_DESC(audio_codec, "Hardware codec's name in use");
+
+static struct mrfld_audio_platform_data mrfld_audio_pdata;
+
+void *merfld_audio_platform_data(void *info)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pr_debug("in %s\n", __func__);
+
+ ret = add_sst_platform_device();
+ if (ret < 0) {
+ pr_err("%s failed to sst_platform device\n", __func__);
+ return NULL;
+ }
+
+ pdev = platform_device_alloc("hdmi-audio", -1);
+ if (!pdev) {
+ pr_err("failed to allocate hdmi-audio platform device\n");
+ return NULL;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add hdmi-audio platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ /* request the gpios for audio */
+ mrfld_audio_pdata.codec_gpio = get_gpio_by_name("audiocodec_int");
+ mrfld_audio_pdata.codec_rst = get_gpio_by_name("audiocodec_rst");
+
+ pdev = platform_device_alloc("mrfld_lm49453", -1);
+ if (!pdev) {
+ pr_err("failed to allocate mrfld_lm49453 platform device\n");
+ return NULL;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add mrfld_lm49453 platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+ if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+ sizeof(mrfld_audio_pdata))) {
+ pr_err("failed to add mrfld_lm49453 platform data\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ register_rpmsg_service("rpmsg_msic_mrfld_audio", RPROC_SCU,
+ RP_MSIC_MRFLD_AUDIO);
+
+ return NULL;
+}
+
+void *mrfld_sst_audio_platform_data(void *info)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ ret = add_sst_platform_device();
+ if (ret < 0) {
+ pr_err("%s failed to sst_platform device\n", __func__);
+ return NULL;
+ }
+
+ if(!audio_codec || !strcmp(audio_codec, "dummy")) {
+ pdev = platform_device_register_simple("merr_dpcm_dummy",
+ 0, NULL, 0);
+ if (!pdev) {
+ pr_err("failed to register merr_dpcm_dummy platform device\n");
+ return NULL;
+ }
+ } else if (!strcmp(audio_codec, "wm8958")) {
+ /* Register i2c audio codec wm8958 */
+ wm8958_platform_data(NULL);
+
+ pdev = platform_device_alloc("mrfld_wm8958", -1);
+ if (!pdev) {
+ pr_err("failed to allocate mrfld_wm8958 platform device\n");
+ return NULL;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add mrfld_wm8958 platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+ if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+ sizeof(mrfld_audio_pdata))) {
+ pr_err("failed to add mrfld_wm8958 platform data\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ register_rpmsg_service("rpmsg_mrfld_wm8958_audio", RPROC_SCU,
+ RP_MSIC_MRFLD_AUDIO);
+ }
+ /*
+ * To add a new codec, add a "else if" statement with
+ * its name and its specific implementation.
+ */
+ else {
+ pr_info("Codec %s is not implemented."
+ "Dummy codec selected...\n", audio_codec);
+
+ pdev = platform_device_register_simple("merr_dpcm_dummy",
+ 0, NULL, 0);
+ if (!pdev) {
+ pr_err("failed to register merr_dpcm_dummy platform device\n");
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_mrfld_audio.h: MRFLD audio platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFLD_AUDIO_H_
+#define _PLATFORM_MRFLD_AUDIO_H_
+
+#include <linux/sfi.h>
+
+struct mrfld_audio_platform_data {
+ const struct soft_platform_id *spid;
+ int codec_gpio;
+ int codec_rst;
+};
+
+extern void __init *merfld_audio_platform_data(void *info) __attribute__((weak));
+extern void __init *mrfld_sst_audio_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic.c: MSIC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+
+struct intel_msic_platform_data msic_pdata;
+
+static struct resource msic_resources[] = {
+ {
+ .start = INTEL_MSIC_IRQ_PHYS_BASE,
+ .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device msic_device = {
+ .name = "intel_msic",
+ .id = -1,
+ .dev = {
+ .platform_data = &msic_pdata,
+ },
+ .num_resources = ARRAY_SIZE(msic_resources),
+ .resource = msic_resources,
+};
+
+inline bool intel_mid_has_msic(void)
+{
+ return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+static int msic_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&msic_device);
+ return 0;
+ }
+
+ return platform_device_register(&msic_device);
+}
+
+static int __init msic_init(void)
+{
+ static struct notifier_block msic_scu_notifier = {
+ .notifier_call = msic_scu_status_change,
+ };
+
+ /*
+ * We need to be sure that the SCU IPC is ready before MSIC device
+ * can be registered.
+ */
+ if (intel_mid_has_msic())
+ intel_scu_notifier_add(&msic_scu_notifier);
+
+ return 0;
+}
+arch_initcall(msic_init);
+
+/*
+ * msic_generic_platform_data - sets generic platform data for the block
+ * @info: pointer to the SFI device table entry for this block
+ * @block: MSIC block
+ *
+ * Function sets IRQ number from the SFI table entry for given device to
+ * the MSIC platform data.
+ */
+void *msic_generic_platform_data(void *info, enum intel_msic_block block)
+{
+ struct sfi_device_table_entry *entry = info;
+
+ BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
+ msic_pdata.irq[block] = entry->irq;
+
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_msic.h: MSIC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_H_
+#define _PLATFORM_MSIC_H_
+
+#include <linux/mfd/intel_msic.h>
+
+extern struct intel_msic_platform_data msic_pdata;
+
+extern void *msic_generic_platform_data(void *info,
+ enum intel_msic_block block) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_adc.c: MSIC ADC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_adc.h"
+
+void __init *msic_adc_platform_data(void *info)
+{
+ struct platform_device *pdev = NULL;
+ struct sfi_device_table_entry *entry = info;
+ static struct intel_mid_gpadc_platform_data msic_adc_pdata;
+ int ret = 0;
+
+ pdev = platform_device_alloc(ADC_DEVICE_NAME, -1);
+
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ ADC_DEVICE_NAME);
+ goto out;
+ }
+
+ msic_adc_pdata.intr = 0xffff7fc0;
+
+ pdev->dev.platform_data = &msic_adc_pdata;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add adc platform device\n");
+ platform_device_put(pdev);
+ goto out;
+ }
+
+ install_irq_resource(pdev, entry->irq);
+
+ register_rpmsg_service("rpmsg_msic_adc", RPROC_SCU,
+ RP_MSIC_ADC);
+out:
+ return &msic_adc_pdata;
+}
--- /dev/null
+/*
+ * platform_msic_adc.h: MSIC ADC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_ADC_H_
+#define _PLATFORM_MSIC_ADC_H_
+
+#define ADC_DEVICE_NAME "msic_adc"
+
+extern void __init *msic_adc_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_audio.c: MSIC audio platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_audio.h"
+
+void *msic_audio_platform_data(void *info)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
+
+ if (IS_ERR(pdev)) {
+ pr_err("failed to create audio platform device\n");
+ return NULL;
+ }
+
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
+}
--- /dev/null
+/*
+ * platform_msic_audio.h: MSIC audio platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_AUDIO_H_
+#define _PLATFORM_MSIC_AUDIO_H_
+
+extern void __init *msic_audio_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_battery.c: MSIC battery platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_battery.h"
+
+void __init *msic_battery_platform_data(void *info)
+{
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
+}
--- /dev/null
+/*
+ * platform_msic_battery.h: MSIC battery platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_BATTERY_H_
+#define _PLATFORM_MSIC_BATTERY_H_
+
+extern void __init *msic_battery_platform_data(void *info)
+ __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_gpio.c: MSIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_gpio.h"
+
+void __init *msic_gpio_platform_data(void *info)
+{
+ struct platform_device *pdev = NULL;
+ struct sfi_device_table_entry *entry = info;
+ static struct intel_msic_gpio_pdata msic_gpio_pdata;
+ int ret;
+ int gpio;
+ struct resource res;
+
+ pdev = platform_device_alloc(MSIC_GPIO_DEVICE_NAME, -1);
+
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ MSIC_GPIO_DEVICE_NAME);
+ return NULL;
+ }
+
+ gpio = get_gpio_by_name("msic_gpio_base");
+
+ if (gpio < 0)
+ return NULL;
+
+ /* Basincove PMIC GPIO has total 8 GPIO pins,
+ * GPIO[5:2,0] support 1.8V, GPIO[7:6,1] support 1.8V and 3.3V,
+ * We group GPIO[5:2] to low voltage and GPIO[7:6] to
+ * high voltage. Because the CTL registers are contiguous,
+ * this grouping method doesn't affect the driver usage but
+ * easy for the driver sharing among multiple platforms.
+ */
+ msic_gpio_pdata.ngpio_lv = 6;
+ msic_gpio_pdata.ngpio_hv = 2;
+ msic_gpio_pdata.gpio0_lv_ctlo = 0x7E;
+ msic_gpio_pdata.gpio0_lv_ctli = 0x8E;
+ msic_gpio_pdata.gpio0_hv_ctlo = 0x84;
+ msic_gpio_pdata.gpio0_hv_ctli = 0x94;
+
+ msic_gpio_pdata.can_sleep = 1;
+ msic_gpio_pdata.gpio_base = gpio;
+
+ pdev->dev.platform_data = &msic_gpio_pdata;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add msic gpio platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ res.name = "IRQ",
+ res.flags = IORESOURCE_IRQ,
+ res.start = entry->irq;
+ platform_device_add_resources(pdev, &res, 1);
+
+ register_rpmsg_service("rpmsg_msic_gpio", RPROC_SCU, RP_MSIC_GPIO);
+
+ return &msic_gpio_pdata;
+}
--- /dev/null
+/*
+ * platform_msic_gpio.h: MSIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_GPIO_H_
+#define _PLATFORM_MSIC_GPIO_H_
+
+#define MSIC_GPIO_DEVICE_NAME "msic_gpio"
+
+extern void __init *msic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_ocd.c: MSIC OCD platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_ocd.h"
+
+void __init *msic_ocd_platform_data(void *info)
+{
+ static struct intel_msic_ocd_pdata msic_ocd_pdata;
+ int gpio;
+
+ gpio = get_gpio_by_name("ocd_gpio");
+
+ if (gpio < 0)
+ return NULL;
+
+ msic_ocd_pdata.gpio = gpio;
+ msic_pdata.ocd = &msic_ocd_pdata;
+
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
+}
+
--- /dev/null
+/*
+ * platform_msic_ocd.h: MSIC OCD platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_OCD_H_
+#define _PLATFORM_MSIC_OCD_H_
+
+extern void __init *msic_ocd_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_power_btn.c: MSIC power btn platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include "platform_msic_power_btn.h"
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define BCOVE_PBIRQ 0x02
+#define BCOVE_PBIRQMASK 0x0d
+
+static struct intel_msic_power_btn_platform_data msic_power_btn_pdata;
+
+static int mrfl_pb_irq_ack(struct intel_msic_power_btn_platform_data *pdata)
+{
+ intel_scu_ipc_update_register(BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
+ intel_scu_ipc_update_register(BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
+
+ return 0;
+}
+
+void __init *msic_power_btn_platform_data(void *info)
+{
+ int ret;
+ struct platform_device *pdev;
+ struct sfi_device_table_entry *entry = info;
+ struct resource res;
+
+ pdev = platform_device_alloc(INTEL_MID_POWERBTN_DEV_NAME, -1);
+ if (!pdev) {
+ pr_err("%s(): out of memory\n", __func__);
+ return NULL;
+ }
+
+ msic_power_btn_pdata.pbstat = 0xfffff61a;
+ msic_power_btn_pdata.pb_level = (1 << 4);
+ msic_power_btn_pdata.irq_lvl1_mask = 0x0c;
+ msic_power_btn_pdata.irq_ack = mrfl_pb_irq_ack;
+
+ pdev->dev.platform_data = &msic_power_btn_pdata;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("%s(): platform_device_add() failed\n", __func__);
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ res.name = "IRQ",
+ res.flags = IORESOURCE_IRQ,
+ res.start = entry->irq;
+ platform_device_add_resources(pdev, &res, 1);
+
+ register_rpmsg_service("rpmsg_mid_powerbtn",
+ RPROC_SCU, RP_MSIC_POWER_BTN);
+
+ return &msic_power_btn_pdata;
+}
--- /dev/null
+/*
+ * platform_msic_power_btn.h: MSIC power btn platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_POWER_BTN_H_
+#define _PLATFORM_MSIC_POWER_BTN_H_
+
+#define INTEL_MID_POWERBTN_DEV_NAME "mid_powerbtn"
+
+extern void __init *msic_power_btn_platform_data(void *info)
+ __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_msic_thermal.c: msic_thermal platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_thermal.h"
+
+/* ctp thermal sensor list */
+static struct intel_mid_thermal_sensor ctp_sensors[] = {
+ {
+ .name = SKIN0_NAME,
+ .index = 0,
+ .slope = 410,
+ .intercept = 16808,
+ .adc_channel = 0x04 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin0_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = SKIN1_NAME,
+ .index = 1,
+ .slope = 665,
+ .intercept = 8375,
+ .adc_channel = 0x04 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin0_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = MSIC_DIE_NAME,
+ .index = 2,
+ .slope = 368,
+ .intercept = 219560,
+ .adc_channel = 0x03 | CH_NEED_VCALIB,
+ .direct = true,
+ },
+ {
+ .name = BPTHERM_NAME,
+ .index = 3,
+ .slope = 788,
+ .intercept = 5065,
+ .adc_channel = 0x09 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = bptherm_temp_correlation,
+ .direct = false,
+ },
+
+};
+
+/* mfld thermal sensor list */
+static struct intel_mid_thermal_sensor mfld_sensors[] = {
+ {
+ .name = SKIN0_NAME,
+ .index = 0,
+ .slope = 851,
+ .intercept = 2800,
+ .adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin0_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = SKIN1_NAME,
+ .index = 1,
+ .slope = 806,
+ .intercept = 1800,
+ .adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin1_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = MSIC_SYS_NAME,
+ .index = 2,
+ .slope = 0,
+ .intercept = 0,
+ .adc_channel = 0x0A | CH_NEED_VREF | CH_NEED_VCALIB,
+ .direct = false,
+ },
+ {
+ .name = MSIC_DIE_NAME,
+ .index = 3,
+ .slope = 368,
+ .intercept = 219560,
+ .adc_channel = 0x03 | CH_NEED_VCALIB,
+ .direct = true,
+ },
+
+};
+
+/* LEX thermal sensor list */
+static struct intel_mid_thermal_sensor lex_sensors[] = {
+ {
+ .name = SKIN0_NAME,
+ .index = 0,
+ .slope = 851,
+ .intercept = 2800,
+ .adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin0_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = SKIN1_NAME,
+ .index = 1,
+ .slope = 806,
+ .intercept = 1800,
+ .adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+ .temp_correlation = skin1_temp_correlation,
+ .direct = false,
+ },
+ {
+ .name = MSIC_SYS_NAME,
+ .index = 2,
+ .slope = 0,
+ .intercept = 0,
+ .adc_channel = 0x0A | CH_NEED_VREF | CH_NEED_VCALIB,
+ .direct = false,
+ },
+ {
+ .name = MSIC_DIE_NAME,
+ .index = 3,
+ .slope = 368,
+ .intercept = 219560,
+ .adc_channel = 0x03 | CH_NEED_VCALIB,
+ .direct = true,
+ },
+
+};
+
+
+static struct intel_mid_thermal_platform_data pdata[] = {
+ [mfld_thermal] = {
+ .num_sensors = 4,
+ .sensors = mfld_sensors,
+ .soc_cooling = false,
+ },
+ [ctp_thermal] = {
+ .num_sensors = 4,
+ .sensors = ctp_sensors,
+ .soc_cooling = true,
+ },
+ [lex_thermal] = {
+ .num_sensors = 4,
+ .sensors = lex_sensors,
+ .soc_cooling = false,
+ },
+};
+
+void __init *msic_thermal_platform_data(void *info)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(MSIC_THERM_DEV_NAME, -1);
+ if (!pdev) {
+ pr_err("out of memory for SFI platform dev %s\n",
+ MSIC_THERM_DEV_NAME);
+ return NULL;
+ }
+
+ if (platform_device_add(pdev)) {
+ pr_err("failed to add thermal platform device\n");
+ platform_device_put(pdev);
+ return NULL;
+ }
+
+ pdev->dev.platform_data = &pdata[mfld_thermal];
+
+ register_rpmsg_service("rpmsg_mid_thermal", RPROC_SCU, RP_MSIC_THERMAL);
+
+ return 0;
+}
--- /dev/null
+/*
+ * platform_msic_thermal.h: msic_thermal platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_THERMAL_H_
+#define _PLATFORM_MSIC_THERMAL_H_
+
+#define MSIC_THERM_DEV_NAME "msic_thermal"
+
+extern void __init *msic_thermal_platform_data(void *info)
+ __attribute__((weak));
+enum {
+ mfld_thermal,
+ ctp_thermal,
+ lex_thermal,
+ vb_thermal,
+};
+
+#endif
--- /dev/null
+/*
+ * platform_pcal9555a.c: pcal9555a platform data initilization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <asm/intel-mid.h>
+#include "platform_pcal9555a.h"
+
+
+void __init *pcal9555a_platform_data(void *info)
+{
+ static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM];
+ static int nr;
+ struct pca953x_platform_data *pcal9555a;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ if (!info) {
+ pr_err("%s: invalid info pointer\n", __func__);
+ return NULL;
+ }
+
+ if (nr >= PCAL9555A_NUM) {
+ pr_err("%s: too many pcal9555a, we only support %d\n",
+ __func__, PCAL9555A_NUM);
+ return NULL;
+ }
+ pcal9555a = &pcal9555a_pdata[nr++];
+
+ /* we have several pcal9555a on the board, we only need load several
+ * instances of the same pca953x driver to cover them
+ */
+
+ snprintf(base_pin_name, sizeof(base_pin_name),
+ "%s_base", i2c_info->type);
+ snprintf(intr_pin_name, sizeof(intr_pin_name),
+ "%s_int", i2c_info->type);
+
+ strcpy(i2c_info->type, "pcal9555a");
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ pcal9555a->gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ pcal9555a->irq_base = -1;
+ }
+ return pcal9555a;
+}
--- /dev/null
+/*
+ * platform_pcal9555a.h: pcal9555a platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PCAL9555A_H_
+#define _PLATFORM_PCAL9555A_H_
+
+/* we have multiple pcal9555a on the board ... */
+#define PCAL9555A_NUM 4
+
+extern void __init *pcal9555a_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_pmic_gpio.c: PMIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <linux/intel_pmic_gpio.h>
+#include "platform_pmic_gpio.h"
+
+void __init *pmic_gpio_platform_data(void *info)
+{
+ static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
+ int gpio_base = get_gpio_by_name("pmic_gpio_base");
+
+ if (gpio_base == -1)
+ gpio_base = 64;
+ pmic_gpio_pdata.gpio_base = gpio_base;
+ pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ pmic_gpio_pdata.gpiointr = 0xffffeff8;
+
+ return &pmic_gpio_pdata;
+}
+
--- /dev/null
+/*
+ * platform_pmic_gpio.h: PMIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PMIC_GPIO_H_
+#define _PLATFORM_PMIC_GPIO_H_
+
+extern void __init *pmic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_scu_flis.c: scu_flis platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Ning Li <ning.li@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_flis.h>
+#include "platform_scu_flis.h"
+
+static struct pin_mmio_flis_t tng_pin_mmio_flis_table[TNG_PIN_NUM] = {
+ [tng_usb_ulpi_0_clk] = { writable, 0x0500 },
+ [tng_usb_ulpi_0_data_0] = { writable, 0x0504 },
+ [tng_usb_ulpi_0_data_1] = { writable, 0x0508 },
+ [tng_usb_ulpi_0_data_2] = { writable, 0x050C },
+ [tng_usb_ulpi_0_data_3] = { writable, 0x0510 },
+ [tng_usb_ulpi_0_data_4] = { writable, 0x0514 },
+ [tng_usb_ulpi_0_data_5] = { writable, 0x0518 },
+ [tng_usb_ulpi_0_data_6] = { writable, 0x051C },
+ [tng_usb_ulpi_0_data_7] = { writable, 0x0520 },
+ [tng_usb_ulpi_0_dir] = { writable, 0x0524 },
+ [tng_usb_ulpi_0_nxt] = { writable, 0x0528 },
+ [tng_usb_ulpi_0_refclk] = { writable, 0x052C },
+ [tng_usb_ulpi_0_stp] = { writable, 0x0530 },
+ [tng_emmc_0_clk] = { writable, 0x0900 },
+ [tng_emmc_0_cmd] = { writable, 0x0904 },
+ [tng_emmc_0_d_0] = { writable, 0x0908 },
+ [tng_emmc_0_d_1] = { writable, 0x090C },
+ [tng_emmc_0_d_2] = { writable, 0x0910 },
+ [tng_emmc_0_d_3] = { writable, 0x0914 },
+ [tng_emmc_0_d_4] = { writable, 0x0918 },
+ [tng_emmc_0_d_5] = { writable, 0x091C },
+ [tng_emmc_0_d_6] = { writable, 0x0920 },
+ [tng_emmc_0_d_7] = { writable, 0x0924 },
+ [tng_emmc_0_rst_b] = { writable, 0x0928 },
+ [tng_gp_emmc_1_clk] = { writable, 0x092C },
+ [tng_gp_emmc_1_cmd] = { writable, 0x0930 },
+ [tng_gp_emmc_1_d_0] = { writable, 0x0934 },
+ [tng_gp_emmc_1_d_1] = { writable, 0x0938 },
+ [tng_gp_emmc_1_d_2] = { writable, 0x093C },
+ [tng_gp_emmc_1_d_3] = { writable, 0x0940 },
+ [tng_gp_emmc_1_d_4] = { writable, 0x0944 },
+ [tng_gp_emmc_1_d_5] = { writable, 0x0948 },
+ [tng_gp_emmc_1_d_6] = { writable, 0x094C },
+ [tng_gp_emmc_1_d_7] = { writable, 0x0950 },
+ [tng_gp_emmc_1_rst_b] = { writable, 0x0954 },
+ [tng_gp_28] = { writable, 0x0958 },
+ [tng_gp_29] = { writable, 0x095C },
+ [tng_gp_sdio_0_cd_b] = { writable, 0x0D00 },
+ [tng_gp_sdio_0_clk] = { writable, 0x0D04 },
+ [tng_gp_sdio_0_cmd] = { writable, 0x0D08 },
+ [tng_gp_sdio_0_dat_0] = { writable, 0x0D0C },
+ [tng_gp_sdio_0_dat_1] = { writable, 0x0D10 },
+ [tng_gp_sdio_0_dat_2] = { writable, 0x0D14 },
+ [tng_gp_sdio_0_dat_3] = { writable, 0x0D18 },
+ [tng_gp_sdio_0_lvl_clk_fb] = { writable, 0x0D1C },
+ [tng_gp_sdio_0_lvl_cmd_dir] = { writable, 0x0D20 },
+ [tng_gp_sdio_0_lvl_dat_dir] = { writable, 0x0D24 },
+ [tng_gp_sdio_0_lvl_sel] = { writable, 0x0D28 },
+ [tng_gp_sdio_0_powerdown_b] = { writable, 0x0D2C },
+ [tng_gp_sdio_0_wp] = { writable, 0x0D30 },
+ [tng_gp_sdio_1_clk] = { writable, 0x0D34 },
+ [tng_gp_sdio_1_cmd] = { writable, 0x0D38 },
+ [tng_gp_sdio_1_dat_0] = { writable, 0x0D3C },
+ [tng_gp_sdio_1_dat_1] = { writable, 0x0D40 },
+ [tng_gp_sdio_1_dat_2] = { writable, 0x0D44 },
+ [tng_gp_sdio_1_dat_3] = { writable, 0x0D48 },
+ [tng_gp_sdio_1_powerdown_b] = { writable, 0x0D4C },
+ [tng_mhsi_acdata] = { writable, 0x1100 },
+ [tng_mhsi_acflag] = { writable, 0x1104 },
+ [tng_mhsi_acready] = { writable, 0x1108 },
+ [tng_mhsi_acwake] = { writable, 0x110C },
+ [tng_mhsi_cadata] = { writable, 0x1110 },
+ [tng_mhsi_caflag] = { writable, 0x1114 },
+ [tng_mhsi_caready] = { writable, 0x1118 },
+ [tng_mhsi_cawake] = { writable, 0x111C },
+ [tng_gp_mslim_0_bclk] = { writable, 0x1500 },
+ [tng_gp_mslim_0_bdat] = { writable, 0x1504 },
+ [tng_gp_ssp_0_clk] = { writable, 0x1508 },
+ [tng_gp_ssp_0_fs] = { writable, 0x150C },
+ [tng_gp_ssp_0_rxd] = { writable, 0x1510 },
+ [tng_gp_ssp_0_txd] = { writable, 0x1514 },
+ [tng_gp_ssp_1_clk] = { writable, 0x1518 },
+ [tng_gp_ssp_1_fs] = { writable, 0x151C },
+ [tng_gp_ssp_1_rxd] = { writable, 0x1520 },
+ [tng_gp_ssp_1_txd] = { writable, 0x1524 },
+ [tng_gp_ssp_2_clk] = { writable, 0x1528 },
+ [tng_gp_ssp_2_fs] = { writable, 0x152C },
+ [tng_gp_ssp_2_rxd] = { writable, 0x1530 },
+ [tng_gp_ssp_2_txd] = { writable, 0x1534 },
+ [tng_gp_ssp_3_clk] = { writable, 0x1900 },
+ [tng_gp_ssp_3_fs] = { writable, 0x1904 },
+ [tng_gp_ssp_3_rxd] = { writable, 0x1908 },
+ [tng_gp_ssp_3_txd] = { writable, 0x190C },
+ [tng_gp_ssp_4_clk] = { writable, 0x1910 },
+ [tng_gp_ssp_4_fs_0] = { writable, 0x1914 },
+ [tng_gp_ssp_4_fs_1] = { writable, 0x1918 },
+ [tng_gp_ssp_4_fs_2] = { writable, 0x191C },
+ [tng_gp_ssp_4_fs_3] = { writable, 0x1920 },
+ [tng_gp_ssp_4_rxd] = { writable, 0x1924 },
+ [tng_gp_ssp_4_txd] = { writable, 0x1928 },
+ [tng_gp_ssp_5_clk] = { writable, 0x192C },
+ [tng_gp_ssp_5_fs_0] = { writable, 0x1930 },
+ [tng_gp_ssp_5_fs_1] = { writable, 0x1934 },
+ [tng_gp_ssp_5_fs_2] = { writable, 0x1938 },
+ [tng_gp_ssp_5_fs_3] = { writable, 0x193C },
+ [tng_gp_ssp_5_rxd] = { writable, 0x1940 },
+ [tng_gp_ssp_5_txd] = { writable, 0x1944 },
+ [tng_gp_ssp_6_clk] = { writable, 0x1948 },
+ [tng_gp_ssp_6_fs] = { writable, 0x194C },
+ [tng_gp_ssp_6_rxd] = { writable, 0x1950 },
+ [tng_gp_ssp_6_txd] = { writable, 0x1954 },
+ [tng_gp_i2c_1_scl] = { writable, 0x1D00 },
+ [tng_gp_i2c_1_sda] = { writable, 0x1D04 },
+ [tng_gp_i2c_2_scl] = { writable, 0x1D08 },
+ [tng_gp_i2c_2_sda] = { writable, 0x1D0C },
+ [tng_gp_i2c_3_scl] = { writable, 0x1D10 },
+ [tng_gp_i2c_3_sda] = { writable, 0x1D14 },
+ [tng_gp_i2c_4_scl] = { writable, 0x1D18 },
+ [tng_gp_i2c_4_sda] = { writable, 0x1D1C },
+ [tng_gp_i2c_5_scl] = { writable, 0x1D20 },
+ [tng_gp_i2c_5_sda] = { writable, 0x1D24 },
+ [tng_gp_i2c_6_scl] = { writable, 0x1D28 },
+ [tng_gp_i2c_6_sda] = { writable, 0x1D2C },
+ [tng_gp_i2c_7_scl] = { writable, 0x1D30 },
+ [tng_gp_i2c_7_sda] = { writable, 0x1D34 },
+ [tng_gp_uart_0_cts] = { writable, 0x2100 },
+ [tng_gp_uart_0_rts] = { writable, 0x2104 },
+ [tng_gp_uart_0_rx] = { writable, 0x2108 },
+ [tng_gp_uart_0_tx] = { writable, 0x210C },
+ [tng_gp_uart_1_cts] = { writable, 0x2110 },
+ [tng_gp_uart_1_rts] = { writable, 0x2114 },
+ [tng_gp_uart_1_rx] = { writable, 0x2118 },
+ [tng_gp_uart_1_tx] = { writable, 0x211C },
+ [tng_gp_uart_2_cts] = { writable, 0x2120 },
+ [tng_gp_uart_2_rts] = { writable, 0x2124 },
+ [tng_gp_uart_2_rx] = { writable, 0x2128 },
+ [tng_gp_uart_2_tx] = { writable, 0x212C },
+ [tng_gp_13] = { writable, 0x2500 },
+ [tng_gp_14] = { writable, 0x2504 },
+ [tng_gp_15] = { writable, 0x2508 },
+ [tng_gp_16] = { writable, 0x250C },
+ [tng_gp_17] = { writable, 0x2510 },
+ [tng_gp_18] = { writable, 0x2514 },
+ [tng_gp_19] = { writable, 0x2518 },
+ [tng_gp_20] = { writable, 0x251C },
+ [tng_gp_21] = { writable, 0x2520 },
+ [tng_gp_22] = { writable, 0x2524 },
+ [tng_gp_23] = { writable, 0x2528 },
+ [tng_gp_24] = { writable, 0x252C },
+ [tng_gp_25] = { writable, 0x2530 },
+ [tng_gp_fast_int_0] = { writable, 0x2534 },
+ [tng_gp_fast_int_1] = { writable, 0x2538 },
+ [tng_gp_fast_int_2] = { writable, 0x253C },
+ [tng_gp_fast_int_3] = { writable, 0x2540 },
+ [tng_gp_pwm_0] = { writable, 0x2544 },
+ [tng_gp_pwm_1] = { writable, 0x2548 },
+ [tng_gp_camerasb_0] = { writable, 0x2900 },
+ [tng_gp_camerasb_1] = { writable, 0x2904 },
+ [tng_gp_camerasb_2] = { writable, 0x2908 },
+ [tng_gp_camerasb_3] = { writable, 0x290C },
+ [tng_gp_camerasb_4] = { writable, 0x2910 },
+ [tng_gp_camerasb_5] = { writable, 0x2914 },
+ [tng_gp_camerasb_6] = { writable, 0x2918 },
+ [tng_gp_camerasb_7] = { writable, 0x291C },
+ [tng_gp_camerasb_8] = { writable, 0x2920 },
+ [tng_gp_camerasb_9] = { writable, 0x2924 },
+ [tng_gp_camerasb_10] = { writable, 0x2928 },
+ [tng_gp_camerasb_11] = { writable, 0x292C },
+ [tng_gp_clkph_0] = { writable, 0x2D00 },
+ [tng_gp_clkph_1] = { writable, 0x2D04 },
+ [tng_gp_clkph_2] = { writable, 0x2D08 },
+ [tng_gp_clkph_3] = { writable, 0x2D0C },
+ [tng_gp_clkph_4] = { writable, 0x2D10 },
+ [tng_gp_clkph_5] = { writable, 0x2D14 },
+ [tng_gp_hdmi_hpd] = { writable, 0x2D18 },
+ [tng_gp_intd_dsi_te1] = { writable, 0x2D1C },
+ [tng_gp_intd_dsi_te2] = { writable, 0x2D20 },
+ [tng_osc_clk_ctrl_0] = { writable, 0x2D24 },
+ [tng_osc_clk_ctrl_1] = { writable, 0x2D28 },
+ [tng_osc_clk_out_0] = { writable, 0x2D2C },
+ [tng_osc_clk_out_1] = { writable, 0x2D30 },
+ [tng_osc_clk_out_2] = { writable, 0x2D34 },
+ [tng_osc_clk_out_3] = { writable, 0x2D38 },
+ [tng_osc_clk_out_4] = { writable, 0x2D3C },
+ [tng_resetout_b] = { writable, 0x2D40 },
+ [tng_xxpmode] = { writable, 0x2D44 },
+ [tng_xxprdy] = { writable, 0x2D48 },
+ [tng_xxpreq_b] = { writable, 0x2D4C },
+ [tng_gp_26] = { writable, 0x2D50 },
+ [tng_gp_27] = { writable, 0x2D54 },
+ [tng_i2c_0_scl] = { writable, 0x3100 },
+ [tng_i2c_0_sda] = { writable, 0x3104 },
+ [tng_ierr_b] = { writable, 0x3108 },
+ [tng_jtag_tckc] = { writable, 0x310C },
+ [tng_jtag_tdic] = { writable, 0x3110 },
+ [tng_jtag_tdoc] = { writable, 0x3114 },
+ [tng_jtag_tmsc] = { writable, 0x3118 },
+ [tng_jtag_trst_b] = { writable, 0x311C },
+ [tng_prochot_b] = { writable, 0x3120 },
+ [tng_rtc_clk] = { writable, 0x3124 },
+ [tng_svid_vclk] = { writable, 0x3128 },
+ [tng_svid_vdio] = { writable, 0x3130 },
+ [tng_thermtrip_b] = { writable, 0x3134 },
+ [tng_standby] = { writable, 0x3138 },
+ [tng_gp_kbd_dkin_0] = { writable, 0x3500 },
+ [tng_gp_kbd_dkin_1] = { writable, 0x3504 },
+ [tng_gp_kbd_dkin_2] = { writable, 0x3508 },
+ [tng_gp_kbd_dkin_3] = { writable, 0x350C },
+ [tng_gp_kbd_mkin_0] = { writable, 0x3510 },
+ [tng_gp_kbd_mkin_1] = { writable, 0x3514 },
+ [tng_gp_kbd_mkin_2] = { writable, 0x3518 },
+ [tng_gp_kbd_mkin_3] = { writable, 0x351C },
+ [tng_gp_kbd_mkin_4] = { writable, 0x3520 },
+ [tng_gp_kbd_mkin_5] = { writable, 0x3524 },
+ [tng_gp_kbd_mkin_6] = { writable, 0x3528 },
+ [tng_gp_kbd_mkin_7] = { writable, 0x352C },
+ [tng_gp_kbd_mkout_0] = { writable, 0x3530 },
+ [tng_gp_kbd_mkout_1] = { writable, 0x3534 },
+ [tng_gp_kbd_mkout_2] = { writable, 0x3538 },
+ [tng_gp_kbd_mkout_3] = { writable, 0x353C },
+ [tng_gp_kbd_mkout_4] = { writable, 0x3540 },
+ [tng_gp_kbd_mkout_5] = { writable, 0x3544 },
+ [tng_gp_kbd_mkout_6] = { writable, 0x3548 },
+ [tng_gp_kbd_mkout_7] = { writable, 0x354C },
+ [tng_gp_0] = { writable, 0x3900 },
+ [tng_gp_1] = { writable, 0x3904 },
+ [tng_gp_2] = { writable, 0x3908 },
+ [tng_gp_3] = { writable, 0x390C },
+ [tng_gp_4] = { writable, 0x3910 },
+ [tng_gp_5] = { writable, 0x3914 },
+ [tng_gp_6] = { writable, 0x3918 },
+ [tng_gp_7] = { writable, 0x391C },
+ [tng_gp_8] = { writable, 0x3920 },
+ [tng_gp_9] = { writable, 0x3924 },
+ [tng_gp_10] = { writable, 0x3928 },
+ [tng_gp_11] = { writable, 0x392C },
+ [tng_gp_12] = { writable, 0x3930 },
+ [tng_gp_mpti_clk] = { writable, 0x3D00 },
+ [tng_gp_mpti_data_0] = { writable, 0x3D04 },
+ [tng_gp_mpti_data_1] = { writable, 0x3D08 },
+ [tng_gp_mpti_data_2] = { writable, 0x3D0C },
+ [tng_gp_mpti_data_3] = { writable, 0x3D10 },
+};
+
+static int __init intel_scu_flis_init(void)
+{
+ int ret;
+ struct platform_device *pdev = NULL;
+ static struct intel_scu_flis_platform_data flis_pdata;
+
+ flis_pdata.pin_t = NULL;
+ flis_pdata.pin_num = TNG_PIN_NUM;
+ flis_pdata.flis_base = 0xFF0C0000;
+ flis_pdata.flis_len = 0x8000;
+ flis_pdata.mmio_flis_t = tng_pin_mmio_flis_table;
+
+ pdev = platform_device_alloc(FLIS_DEVICE_NAME, -1);
+ if (!pdev) {
+ pr_err("out of memory for platform dev %s\n", FLIS_DEVICE_NAME);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pdev->dev.platform_data = &flis_pdata;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add flis platform device\n");
+ platform_device_put(pdev);
+ goto out;
+ }
+
+ pr_info("intel_scu_flis platform device created\n");
+out:
+ return ret;
+}
+fs_initcall(intel_scu_flis_init);
+
--- /dev/null
+/*
+ * platform_scu_flis.h: scu_flis platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SCU_FLIS_H_
+#define _PLATFORM_SCU_FLIS_H_
+
+#define FLIS_DEVICE_NAME "intel_scu_flis"
+
+#endif
--- /dev/null
+/*
+ * platform_sdio_regulator.c: sdio regulator platform device initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author: chuanxiao.dong@intel.com, feiyix.ning@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+#define DELAY_ONOFF 250
+
+struct acpi_ids { char *hid; char *uid; };
+
+static struct acpi_ids intel_sdio_ids[] = {
+ {"INT33BB", "2"}, /* BYT SDIO */
+ { },
+};
+
+static struct acpi_ids intel_brc_ids[] = {
+ {"BCM4321", NULL}, /* BYT SDIO */
+ { },
+};
+
+static struct regulator_consumer_supply wlan_vmmc_supply = {
+ .supply = "vmmc",
+};
+
+static struct regulator_init_data wlan_vmmc_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wlan_vmmc_supply,
+};
+
+static struct fixed_voltage_config vwlan = {
+ .supply_name = "wlan_en_acpi",
+ .microvolts = 1800000,
+ .gpio = -EINVAL,
+ .startup_delay = 1000 * DELAY_ONOFF,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &wlan_vmmc_data,
+};
+
+static void vwlan_device_release(struct device *dev) {}
+
+static struct platform_device vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = PLATFORM_DEVID_AUTO,
+ .dev = {
+ .platform_data = &vwlan,
+ .release = vwlan_device_release,
+ },
+};
+
+static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+{
+ struct acpi_device *device = NULL;
+ acpi_status status;
+ int result;
+ struct acpi_device *acpi_root;
+
+ result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
+ if (result)
+ return NULL;
+
+ /*
+ * Fixed hardware devices do not appear in the namespace and do not
+ * have handles, but we fabricate acpi_devices for them, so we have
+ * to deal with them specially.
+ */
+ if (!handle)
+ return acpi_root;
+
+ do {
+ status = acpi_get_parent(handle, &handle);
+ if (ACPI_FAILURE(status))
+ return status == AE_NULL_ENTRY ? NULL : acpi_root;
+ } while (acpi_bus_get_device(handle, &device));
+
+ return device;
+}
+
+static int sdio_acpi_match(struct device *dev, void *data)
+{
+ struct acpi_ids *ids = data;
+ struct acpi_handle *handle = ACPI_HANDLE(dev);
+ struct acpi_device_info *info;
+ char *uid = NULL;
+ acpi_status status;
+
+ status = acpi_get_object_info(handle, &info);
+ if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
+ uid = info->unique_id.string;
+ else
+ return false;
+
+ if (!strncmp(ids->hid, dev_name(dev), strlen(ids->hid)))
+ if (!strcmp(ids->uid, uid))
+ return true;
+
+ return false;
+}
+
+static int brc_acpi_match(struct device *dev, void *data)
+{
+ struct acpi_ids *ids = data;
+
+ if (!strncmp(ids->hid, dev_name(dev), strlen(ids->hid)))
+ return true;
+
+ return false;
+}
+
+
+static int brc_fixed_regulator_register_by_acpi(struct platform_device *pdev)
+{
+ struct device *dev;
+ struct acpi_ids *brc_ids;
+ struct fixed_voltage_config *fixedcfg = NULL;
+ struct regulator_init_data *data = NULL;
+ struct acpi_handle *handle;
+ struct acpi_device *parent;
+
+ if (!pdev)
+ return -ENODEV;
+ fixedcfg = pdev->dev.platform_data;
+ if (!fixedcfg)
+ return -ENODEV;
+ data = fixedcfg->init_data;
+ if (!data || !data->consumer_supplies)
+ return -ENODEV;
+
+ /* get the GPIO pin from ACPI device first */
+ for (brc_ids = intel_brc_ids; brc_ids->hid; brc_ids++) {
+ dev = bus_find_device(&platform_bus_type, NULL,
+ brc_ids, brc_acpi_match);
+ if (dev) {
+ handle = ACPI_HANDLE(dev);
+ if (!ACPI_HANDLE(dev))
+ continue;
+ parent = acpi_bus_get_parent(handle);
+ if (!parent)
+ continue;
+
+ data->consumer_supplies->dev_name =
+ dev_name(&parent->dev);
+ fixedcfg->gpio = acpi_get_gpio_by_index(dev, 1, NULL);
+ if (fixedcfg->gpio < 0) {
+ dev_info(dev, "No wlan-enable GPIO\n");
+ continue;
+ }
+ dev_info(dev, "wlan-enable GPIO %d found\n",
+ fixedcfg->gpio);
+ break;
+ }
+ }
+
+ if (brc_ids->hid) {
+ /* add a regulator to control wlan enable gpio */
+ return platform_device_register(&vwlan_device);
+ }
+
+ return -ENODEV;
+}
+
+static int sdio_fixed_regulator_register_by_acpi(struct platform_device *pdev)
+{
+ struct device *dev;
+ struct acpi_ids *sdio_ids;
+ struct fixed_voltage_config *fixedcfg = NULL;
+ struct regulator_init_data *data = NULL;
+
+ if (!pdev)
+ return -ENODEV;
+ fixedcfg = pdev->dev.platform_data;
+ if (!fixedcfg)
+ return -ENODEV;
+ data = fixedcfg->init_data;
+ if (!data || !data->consumer_supplies)
+ return -ENODEV;
+
+ /* get the GPIO pin from ACPI device first */
+ for (sdio_ids = intel_sdio_ids; sdio_ids->hid; sdio_ids++) {
+ dev = bus_find_device(&platform_bus_type, NULL,
+ sdio_ids, sdio_acpi_match);
+ if (dev) {
+ data->consumer_supplies->dev_name = dev_name(dev);
+
+ fixedcfg->gpio = acpi_get_gpio_by_index(dev, 0, NULL);
+ if (fixedcfg->gpio < 0) {
+ dev_info(dev, "No wlan-enable GPIO\n");
+ continue;
+ }
+ dev_info(dev, "wlan-enable GPIO %d found\n",
+ fixedcfg->gpio);
+ break;
+ }
+ }
+
+ if (sdio_ids->hid) {
+ /* add a regulator to control wlan enable gpio */
+ return platform_device_register(&vwlan_device);
+ }
+
+ return -ENODEV;
+}
+
+static int __init wifi_regulator_init(void)
+{
+ int ret;
+ /* register fixed regulator through ACPI device */
+ ret = brc_fixed_regulator_register_by_acpi(&vwlan_device);
+ if (!ret)
+ return ret;
+
+ pr_err("%s: No SDIO host in platform devices\n", __func__);
+ return ret;
+}
+rootfs_initcall(wifi_regulator_init);
--- /dev/null
+/*
+ * platform_soc_thermal.c: Platform data for SoC DTS driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#define pr_fmt(fmt) "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "platform_soc_thermal.h"
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+
+#define BYT_SOC_THRM_IRQ 86
+#define BYT_SOC_THRM "soc_thrm"
+
+static struct resource res = {
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct soc_throttle_data tng_soc_data[] = {
+ {
+ .power_limit = 0x9C,
+ .floor_freq = 0x00,
+ },
+ {
+ .power_limit = 0x8C,
+ .floor_freq = 0x00,
+ },
+ {
+ .power_limit = 0x7C,
+ .floor_freq = 0x00,
+ },
+ {
+ .power_limit = 0x6C,
+ .floor_freq = 0x00,
+ },
+};
+
+static struct soc_throttle_data vlv2_soc_data[] = {
+ {
+ .power_limit = 0xDA, /* 7W */
+ .floor_freq = 0x00,
+ },
+ {
+ .power_limit = 0x6D, /* 3.5W */
+ .floor_freq = 0x01,
+ },
+ {
+ .power_limit = 0x2E, /* 1.5W */
+ .floor_freq = 0x01,
+ },
+ {
+ .power_limit = 0x2E, /* 1.5W */
+ .floor_freq = 0x01,
+ },
+};
+
+void soc_thrm_device_handler(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ pr_info("IPC bus = %d, name = %16.16s, irq = 0x%2x\n",
+ pentry->host_num, pentry->name, pentry->irq);
+
+ res.start = pentry->irq;
+
+ pdev = platform_device_register_simple(pentry->name, -1,
+ (const struct resource *)&res, 1);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ pr_err("platform_soc_thermal:pdev_register failed: %d\n", ret);
+ }
+
+ pdev->dev.platform_data = &tng_soc_data;
+}
+
+static inline int byt_program_ioapic(int irq, int trigger, int polarity)
+{
+ struct io_apic_irq_attr irq_attr;
+ int ioapic;
+
+ ioapic = mp_find_ioapic(irq);
+ if (ioapic < 0)
+ return -EINVAL;
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = trigger;
+ irq_attr.polarity = polarity;
+ return io_apic_set_pci_routing(NULL, irq, &irq_attr);
+}
+
+static int __init byt_soc_thermal_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ res.start = BYT_SOC_THRM_IRQ;
+
+ pdev = platform_device_register_simple(BYT_SOC_THRM, -1,
+ (const struct resource *)&res, 1);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ pr_err("byt_soc_thermal:pdev_register failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = byt_program_ioapic(BYT_SOC_THRM_IRQ, 0, 1);
+ if (ret) {
+ pr_err("%s: ioapic programming failed", __func__);
+ platform_device_unregister(pdev);
+ }
+
+ pdev->dev.platform_data = &vlv2_soc_data;
+
+ return ret;
+}
+
+static int __init platform_soc_thermal_init(void)
+{
+ return 0;
+}
+device_initcall(platform_soc_thermal_init);
--- /dev/null
+/*
+ * platform_soc_thermal.h: platform SoC thermal driver library header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SOC_THERMAL_H_
+#define _PLATFORM_SOC_THERMAL_H_
+
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+
+extern void soc_thrm_device_handler(struct sfi_device_table_entry *,
+ struct devs_id *) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_spidev.c: spidev platform data initilization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include "platform_spidev.h"
+
+static void tng_ssp_spi_cs_control(u32 command);
+static void tng_ssp_spi_platform_pinmux(void);
+
+static int tng_ssp_spi2_FS_gpio = 111;
+
+static struct intel_mid_ssp_spi_chip chip = {
+ .burst_size = DFLT_FIFO_BURST_SIZE,
+ .timeout = DFLT_TIMEOUT_VAL,
+ /* SPI DMA is currently not usable on Tangier */
+ .dma_enabled = false,
+ .cs_control = tng_ssp_spi_cs_control,
+ .platform_pinmux = tng_ssp_spi_platform_pinmux,
+};
+
+static void tng_ssp_spi_cs_control(u32 command)
+{
+ gpio_set_value(tng_ssp_spi2_FS_gpio, (command != 0) ? 1 : 0);
+}
+
+static void tng_ssp_spi_platform_pinmux(void)
+{
+ int err;
+ int saved_muxing;
+ /* Request Chip Select gpios */
+ saved_muxing = gpio_get_alt(tng_ssp_spi2_FS_gpio);
+
+ lnw_gpio_set_alt(tng_ssp_spi2_FS_gpio, LNW_GPIO);
+ err = gpio_request_one(tng_ssp_spi2_FS_gpio,
+ GPIOF_DIR_OUT|GPIOF_INIT_HIGH, "Arduino Shield SS");
+ if (err) {
+ pr_err("%s: unable to get Chip Select GPIO,\
+ fallback to legacy CS mode \n", __func__);
+ lnw_gpio_set_alt(tng_ssp_spi2_FS_gpio, saved_muxing);
+ chip.cs_control = NULL;
+ chip.platform_pinmux = NULL;
+ }
+}
+
+void __init *spidev_platform_data(void *info)
+{
+ struct spi_board_info *spi_info = info;
+
+ if (!spi_info) {
+ pr_err("%s: invalid info pointer\n", __func__);
+ return NULL;
+ }
+
+ spi_info->mode = SPI_MODE_0;
+
+ spi_info->controller_data = &chip;
+ spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+ return NULL;
+}
--- /dev/null
+/*
+ * platform_spidev.h: spidev platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SPIDEV_H_
+#define _PLATFORM_SPIDEV_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM 5
+#define FORCE_CHIP_SELECT 1
+
+extern void *spidev_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_sst_libs.c: SST platform data initilization file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/asound.h>
+
+static struct sst_platform_data sst_platform_pdata;
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+static struct sst_dev_stream_map mrfld_strm_map[] = {
+ {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+ {MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_DB, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_LL, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_LOW_PCM0_IN, SST_TASK_ID_SBA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_VOIP, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE2_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE4_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE5_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE6_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE7_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE8_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_VOIP, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE2_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE3_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE4_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE5_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE6_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE7_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE8_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+};
+#else
+static struct sst_dev_stream_map mrfld_strm_map[] = {
+ {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+ {MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_AUDIO, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_AUDIO, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+ {MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+ {MERR_SALTBAY_AWARE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_AWARE_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+ {MERR_SALTBAY_VAD, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VAD_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+};
+#endif
+
+#define EQ_EFFECT_ALGO_ID 0x99
+static struct sst_dev_effects_map mrfld_effs_map[] = {
+ {
+ {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45},/* uuid */
+ EQ_EFFECT_ALGO_ID, /* algo id */
+ {0x00, 0x43, 0xed, 0x0b, 0xd6, 0xdd, 0xdb, 0x11, 0x34, 0x8f, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b, /* descriptor */
+ 0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45,
+ 0x12, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x45, 0x71, 0x75, 0x61,
+ 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x6e, 0x74, 0x65,
+ 0x6c, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ }
+};
+
+static struct sst_dev_effects_resource_map mrfld_effs_res_map[] = {
+ {
+ {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45}, /* uuid */
+ 0x50, /* Flags */
+ 0x00, /* Cpu load */
+ 0x01, /* Memory Usage */
+ }
+};
+
+static void set_mrfld_platform_config(void)
+{
+ sst_platform_pdata.pdev_strm_map = mrfld_strm_map;
+ sst_platform_pdata.strm_map_size = ARRAY_SIZE(mrfld_strm_map);
+ sst_platform_pdata.pdev_effs.effs_map = mrfld_effs_map;
+ sst_platform_pdata.pdev_effs.effs_res_map = mrfld_effs_res_map;
+ sst_platform_pdata.pdev_effs.effs_num_map = ARRAY_SIZE(mrfld_effs_map);
+}
+
+static void populate_platform_data(void)
+{
+ set_mrfld_platform_config();
+}
+
+int add_sst_platform_device(void)
+{
+ struct platform_device *pdev = NULL;
+ int ret;
+
+ populate_platform_data();
+
+ pdev = platform_device_alloc("sst-platform", -1);
+ if (!pdev) {
+ pr_err("failed to allocate audio platform device\n");
+ return -EINVAL;
+ }
+
+ ret = platform_device_add_data(pdev, &sst_platform_pdata,
+ sizeof(sst_platform_pdata));
+ if (ret) {
+ pr_err("failed to add sst platform data\n");
+ platform_device_put(pdev);
+ return -EINVAL;
+ }
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("failed to add audio platform device\n");
+ platform_device_put(pdev);
+ return -EINVAL;
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * platform_tc35876x.c: tc35876x platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c/tc35876x.h>
+#include <asm/intel-mid.h>
+#include "platform_tc35876x.h"
+
+/*tc35876x DSI_LVDS bridge chip and panel platform data*/
+void *tc35876x_platform_data(void *data)
+{
+ static struct tc35876x_platform_data pdata;
+ pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
+ pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
+ pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+ return &pdata;
+}
--- /dev/null
+/*
+ * platform_tc35876x.h: tc35876x platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TC35876X_H_
+#define _PLATFORM_TC35876X_H_
+
+extern void *tc35876x_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_tca6416.c: tca6416 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c/pca953x.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_tca6416.h"
+
+void *tca6416_platform_data(void *info)
+{
+ static struct pca953x_platform_data tca6416;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ strcpy(i2c_info->type, TCA6416_NAME);
+ strcpy(base_pin_name, TCA6416_BASE);
+ strcpy(intr_pin_name, TCA6416_INTR);
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ tca6416.gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ tca6416.irq_base = -1;
+ }
+ return &tca6416;
+}
--- /dev/null
+/*
+ * platform_tca6416.h: tca6416 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TCA6416_H_
+#define _PLATFORM_TCA6416_H_
+
+#define TCA6416_NAME "tca6416"
+#define TCA6416_BASE "tca6416_base"
+#define TCA6416_INTR "tca6416_int"
+
+extern void *tca6416_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_bcm43xx.c: bcm43xx platform data initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/wlan_plat.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include "pci/platform_sdhci_pci.h"
+#include "platform_wifi.h"
+
+static struct resource wifi_res[] = {
+ {
+ .name = "wlan_irq",
+ .start = -1,
+ .end = -1,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING ,
+ },
+};
+
+static struct wifi_platform_data pdata;
+
+static struct platform_device wifi_device = {
+ .name = "wlan",
+ .dev = {
+ .platform_data = &pdata,
+ },
+ .num_resources = ARRAY_SIZE(wifi_res),
+ .resource = wifi_res,
+};
+
+static const unsigned int sdhci_quirk = SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8
+ | SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY
+ | SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8
+ | SDHCI_QUIRK2_NON_STD_CIS;
+
+static void __init wifi_platform_data_init_sfi_fastirq(struct sfi_device_table_entry *pentry,
+ bool should_register)
+{
+ int wifi_irq_gpio = -1;
+
+ /* If the GPIO mode was previously called, this code overloads
+ the IRQ anyway */
+ wifi_res[0].start = wifi_res[0].end = pentry->irq;
+ wifi_res[0].flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH;
+
+ pr_info("wifi_platform_data: IRQ == %d\n", pentry->irq);
+
+ if (should_register && platform_device_register(&wifi_device) < 0)
+ pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called if SFI device WLAN is present */
+void __init wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+ struct devs_id *dev)
+{
+ /* This is used in the driver to know if it is GPIO/FastIRQ */
+ pdata.use_fast_irq = true;
+
+ if (wifi_res[0].start == -1) {
+ pr_info("Using WiFi platform data (Fast IRQ)\n");
+
+ /* Set vendor specific SDIO quirks */
+ sdhci_pdata_set_quirks(sdhci_quirk);
+ wifi_platform_data_init_sfi_fastirq(pe, true);
+ } else {
+ pr_info("Using WiFi platform data (Fast IRQ, overloading GPIO mode set previously)\n");
+ /* We do not register platform device, as it's already been
+ done by wifi_platform_data */
+ wifi_platform_data_init_sfi_fastirq(pe, false);
+ }
+
+}
+
+/* GPIO legacy code path */
+static void __init wifi_platform_data_init_sfi_gpio(void)
+{
+ int wifi_irq_gpio = -1;
+
+ /*Get GPIO numbers from the SFI table*/
+ wifi_irq_gpio = get_gpio_by_name(WIFI_SFI_GPIO_IRQ_NAME);
+ if (wifi_irq_gpio < 0) {
+ pr_err("%s: Unable to find " WIFI_SFI_GPIO_IRQ_NAME
+ " WLAN-interrupt GPIO in the SFI table\n",
+ __func__);
+ return;
+ }
+
+ wifi_res[0].start = wifi_res[0].end = wifi_irq_gpio;
+ pr_info("wifi_platform_data: GPIO == %d\n", wifi_irq_gpio);
+
+ if (platform_device_register(&wifi_device) < 0)
+ pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called from board.c */
+void __init *wifi_platform_data(void *info)
+{
+ /* When fast IRQ platform data has been called first, don't pursue */
+ if (wifi_res[0].start != -1)
+ return NULL;
+
+ pr_info("Using generic wifi platform data\n");
+
+ /* Set vendor specific SDIO quirks */
+#ifdef CONFIG_MMC_SDHCI_PCI
+ sdhci_pdata_set_quirks(sdhci_quirk);
+#endif
+
+#ifndef CONFIG_ACPI
+ /* We are SFI here, register platform device */
+ wifi_platform_data_init_sfi_gpio();
+#endif
+
+ return &wifi_device;
+}
--- /dev/null
+/*
+ * platform_wifi.h: WiFi platform data header file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_WIFI_H_
+#define _PLATFORM_WIFI_H_
+
+#define WIFI_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WIFI_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+
+extern void __init *wifi_platform_data(void *info) __attribute__((weak));
+extern void wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+ struct devs_id *dev) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_wl12xx.c: wl12xx platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/wl12xx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <asm/intel-mid.h>
+#include "platform_wl12xx.h"
+
+static int wl12xx_platform_init(struct wl12xx_platform_data *platform_data);
+static void wl12xx_platform_deinit(struct wl12xx_platform_data *platform_data);
+
+static struct wl12xx_platform_data mid_wifi_control = {
+ .board_ref_clock = 1,
+ .irq = 2,
+ .gpio = -EINVAL,
+ .board_tcxo_clock = 1,
+ .platform_quirks = WL12XX_PLATFORM_QUIRK_EDGE_IRQ,
+ .hw_init = wl12xx_platform_init,
+ .hw_deinit = wl12xx_platform_deinit,
+};
+
+static struct regulator_consumer_supply wl12xx_vmmc3_supply = {
+ .supply = "vmmc",
+ .dev_name = "0000:00:00.0", /*default value*/
+};
+
+static struct regulator_init_data wl12xx_vmmc3 = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wl12xx_vmmc3_supply,
+};
+
+static struct fixed_voltage_config wl12xx_vwlan = {
+ .supply_name = "vwl1271",
+ .microvolts = 1800000,
+ .gpio = 75,
+ .startup_delay = 70000,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &wl12xx_vmmc3,
+};
+
+static struct platform_device wl12xx_vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &wl12xx_vwlan,
+ },
+};
+
+void __init wl12xx_platform_data_init(void *info)
+{
+ struct sd_board_info *sd_info = info;
+ int err;
+
+ /*Get GPIO numbers from the SFI table*/
+ mid_wifi_control.gpio = get_gpio_by_name(WL12XX_SFI_GPIO_IRQ_NAME);
+ if (mid_wifi_control.gpio == -1) {
+ pr_err("%s: Unable to find WLAN-interrupt GPIO in the SFI table\n",
+ __func__);
+ return;
+ }
+
+ /* Set our board_ref_clock from SFI SD board info */
+ if (sd_info->board_ref_clock == ICDK_BOARD_REF_CLK)
+ /*iCDK board*/
+ /*26Mhz TCXO clock ref*/
+ mid_wifi_control.board_ref_clock = 1;
+ else if (sd_info->board_ref_clock == NCDK_BOARD_REF_CLK)
+ /*nCDK board*/
+ /*38,4Mhz TCXO clock ref*/
+ mid_wifi_control.board_ref_clock = 2;
+ err = wl12xx_set_platform_data(&mid_wifi_control);
+ if (err < 0)
+ pr_err("error setting wl12xx data\n");
+
+ /* this is the fake regulator that mmc stack use to power of the
+ wifi sdio card via runtime_pm apis */
+ wl12xx_vwlan.gpio = get_gpio_by_name(WL12XX_SFI_GPIO_ENABLE_NAME);
+ if (wl12xx_vwlan.gpio == -1) {
+ pr_err("%s: Unable to find WLAN-enable GPIO in the SFI table\n",
+ __func__);
+ return;
+ }
+ /* format vmmc reg address from sfi table */
+ sprintf((char *)wl12xx_vmmc3_supply.dev_name, "0000:00:%02x.%01x",
+ (sd_info->addr)>>8, sd_info->addr&0xFF);
+
+ err = platform_device_register(&wl12xx_vwlan_device);
+ if (err < 0)
+ pr_err("error platform_device_register\n");
+
+}
+
+void __init *wl12xx_platform_data(void *info)
+{
+ wl12xx_platform_data_init(info);
+
+ return &mid_wifi_control;
+}
+
+static int wl12xx_platform_init(struct wl12xx_platform_data *platform_data)
+{
+ int err = 0;
+
+ if (IS_ERR(platform_data)) {
+ err = PTR_ERR(platform_data);
+ pr_err("%s: missing wlan platform data: %d\n", __func__, err);
+ goto out;
+ }
+
+ /* gpio must be set to -EINVAL by platform code if
+ gpio based irq is not used*/
+
+ if (gpio_is_valid(platform_data->gpio)) {
+ if (!platform_data->gpio)
+ pr_warn("using GPIO %d for wl12xx\n",
+ platform_data->gpio);
+
+ /* Request gpio */
+ err = gpio_request(platform_data->gpio, "wl12xx");
+ if (err < 0) {
+ pr_err("%s: Unable to request GPIO:%d, err:%d\n",
+ __func__, platform_data->gpio, err);
+ goto out;
+ }
+
+ /* set gpio direction */
+ err = gpio_direction_input(platform_data->gpio);
+ if (err < 0) {
+ pr_err("%s: Unable to set GPIO:%d direction, err:%d\n",
+ __func__, platform_data->gpio, err);
+ goto out;
+ }
+
+ /* convert gpio to irq */
+ platform_data->irq = gpio_to_irq(platform_data->gpio);
+ if (platform_data->irq < 0) {
+ pr_err("%s: Error gpio_to_irq:%d->%d\n", __func__,
+ platform_data->gpio,
+ platform_data->irq);
+ goto out;
+ }
+ }
+
+ sdhci_pci_request_regulators();
+
+ pr_info("%s done\n", __func__);
+out:
+ return err;
+}
+
+static void wl12xx_platform_deinit(struct wl12xx_platform_data *pdata)
+{
+ /* get platform data and free the gpio */
+ if (IS_ERR(pdata)) {
+ pr_err("%s: missing wlan platform data\n", __func__);
+ goto out;
+ }
+
+ if (gpio_is_valid(pdata->gpio)) {
+ if (!pdata->gpio)
+ pr_warn("using GPIO %d for wl12xx\n", pdata->gpio);
+ gpio_free(pdata->gpio);
+ }
+out:
+ return ;
+}
--- /dev/null
+/*
+ * platform_wl12xx.h: wl12xx platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_WL12XX_H_
+#define _PLATFORM_WL12XX_H_
+
+#define WL12XX_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WL12XX_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+#define ICDK_BOARD_REF_CLK 26000000
+#define NCDK_BOARD_REF_CLK 38400000
+
+extern void __init *wl12xx_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * platform_wm8994.c: wm8994 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "platform_wm8994.h"
+
+/***********WM89941 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89941_consumer[] = {
+ REGULATOR_SUPPLY("DBVDD", "1-001a"),
+ REGULATOR_SUPPLY("DBVDD1", "1-001a"),
+ REGULATOR_SUPPLY("DBVDD2", "1-001a"),
+ REGULATOR_SUPPLY("DBVDD3", "1-001a"),
+ REGULATOR_SUPPLY("AVDD2", "1-001a"),
+ REGULATOR_SUPPLY("CPVDD", "1-001a"),
+};
+
+static struct regulator_init_data vwm89941_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vwm89941_consumer),
+ .consumer_supplies = vwm89941_consumer,
+};
+
+static struct fixed_voltage_config vwm89941_config = {
+ .supply_name = "VCC_1.8V_PDA",
+ .microvolts = 1800000,
+ .gpio = -EINVAL,
+ .init_data = &vwm89941_data,
+};
+
+static struct platform_device vwm89941_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &vwm89941_config,
+ },
+};
+
+/***********WM89942 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89942_consumer[] = {
+ REGULATOR_SUPPLY("SPKVDD1", "1-001a"),
+ REGULATOR_SUPPLY("SPKVDD2", "1-001a"),
+};
+
+static struct regulator_init_data vwm89942_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vwm89942_consumer),
+ .consumer_supplies = vwm89942_consumer,
+};
+
+static struct fixed_voltage_config vwm89942_config = {
+ .supply_name = "V_BAT",
+ .microvolts = 3700000,
+ .gpio = -EINVAL,
+ .init_data = &vwm89942_data,
+};
+
+static struct platform_device vwm89942_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &vwm89942_config,
+ },
+};
+
+static struct platform_device wm8994_ldo1_device;
+static struct platform_device wm8994_ldo2_device;
+static struct platform_device *wm1811a_reg_devices[] __initdata = {
+ &vwm89941_device,
+ &vwm89942_device,
+ &wm8994_ldo1_device,
+ &wm8994_ldo2_device
+};
+
+static struct platform_device *wm8958_reg_devices[] __initdata = {
+ &vwm89941_device,
+ &vwm89942_device
+};
+
+static struct regulator_consumer_supply wm8994_avdd1_supply =
+ REGULATOR_SUPPLY("AVDD1", "1-001a");
+
+static struct regulator_consumer_supply wm8994_dcvdd_supply =
+ REGULATOR_SUPPLY("DCVDD", "1-001a");
+
+static struct regulator_init_data wm8994_ldo1_data = {
+ .constraints = {
+ .always_on = 1,
+ .name = "AVDD1_3.0V",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_avdd1_supply,
+};
+
+static struct fixed_voltage_config wm8994_ldo1_config = {
+ .supply_name = "V_BAT_X",
+ .microvolts = 3700000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_ldo1_data,
+};
+
+static struct platform_device wm8994_ldo1_device = {
+ .name = "reg-fixed-voltage",
+ .id = 2,
+ .dev = {
+ .platform_data = &wm8994_ldo1_config,
+ },
+};
+
+
+static struct regulator_init_data wm8994_ldo2_data = {
+ .constraints = {
+ .always_on = 1,
+ .name = "DCVDD_1.0V",
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &wm8994_dcvdd_supply,
+};
+
+static struct fixed_voltage_config wm8994_ldo2_config = {
+ .supply_name = "V_BAT_Y",
+ .microvolts = 3700000,
+ .gpio = -EINVAL,
+ .init_data = &wm8994_ldo2_data,
+};
+
+static struct platform_device wm8994_ldo2_device = {
+ .name = "reg-fixed-voltage",
+ .id = 3,
+ .dev = {
+ .platform_data = &wm8994_ldo2_config,
+ },
+};
+
+static struct wm8958_custom_config custom_config = {
+ .format = 6,
+ .rate = 48000,
+ .channels = 2,
+};
+
+static struct wm8994_pdata wm8994_pdata = {
+ /* configure gpio1 function: 0x0001(Logic level input/output) */
+ .gpio_defaults[0] = 0x0003,
+ .irq_flags = IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ /* FIXME: Below are 1811A specfic, we need to use SPID for these */
+
+ /* configure gpio3/4/5/7 function for AIF2 voice */
+ .gpio_defaults[2] = 0x8100,
+ .gpio_defaults[3] = 0x8100,
+ .gpio_defaults[4] = 0x8100,
+ .gpio_defaults[6] = 0x0100,
+ /* configure gpio8/9/10/11 function for AIF3 BT */
+ /* gpio7 is codec intr pin for GV M2 */
+ .gpio_defaults[7] = 0x0003,
+ .gpio_defaults[8] = 0x0105,
+ .gpio_defaults[9] = 0x0100,
+ .gpio_defaults[10] = 0x0100,
+ .ldo[0] = { 0, &wm8994_ldo1_data }, /* set actual value at wm8994_platform_data() */
+ .ldo[1] = { 0, &wm8994_ldo2_data },
+ .ldo_ena_always_driven = 1,
+
+ .mic_id_delay = 300, /*300ms delay*/
+ .micdet_delay = 500,
+ .micb_en_delay = 5000, /* Keeps MICBIAS2 high for 5sec during jack insertion/removal */
+
+ .custom_cfg = &custom_config,
+};
+
+static int wm8994_get_irq_data(struct wm8994_pdata *pdata,
+ struct i2c_board_info *i2c_info, char *name)
+{
+ int codec_gpio;
+
+ /* alek tells me that since driver is registering a new chip
+ * irq we need to give it a base which is unused so put
+ * 256+192 here */
+ pdata->irq_base = (256 + 192);
+ codec_gpio = get_gpio_by_name(name);
+ if (codec_gpio < 0) {
+ pr_err("%s failed for : %d\n", __func__, codec_gpio);
+ return -EINVAL;
+ }
+ i2c_info->irq = codec_gpio + INTEL_MID_IRQ_OFFSET;
+ return codec_gpio;
+}
+
+void __init *wm8994_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
+ int irq = 0;
+
+ platform_add_devices(wm8958_reg_devices,
+ ARRAY_SIZE(wm8958_reg_devices));
+
+ irq = wm8994_get_irq_data(&wm8994_pdata, i2c_info,
+ "audiocodec_int");
+ if (irq < 0)
+ return NULL;
+
+ return &wm8994_pdata;
+}
+
+static struct i2c_board_info wm8958_info = {
+ I2C_BOARD_INFO("wm8958", 0x1a),
+};
+
+void __init *wm8958_platform_data(void *info)
+{
+ int irq = 0, bus_num = 1;
+
+ platform_add_devices(wm8958_reg_devices,
+ ARRAY_SIZE(wm8958_reg_devices));
+
+ irq = wm8994_get_irq_data(&wm8994_pdata, &wm8958_info,
+ "audiocodec_int");
+
+ if (irq)
+ wm8958_info.platform_data = &wm8994_pdata;
+
+ i2c_register_board_info(bus_num, &wm8958_info, 1);
+
+ return;
+}
--- /dev/null
+#ifndef _PLATFORM_WM8994_H_
+#define _PLATFORM_WM8994_H_
+
+extern void *wm8994_platform_data(void *info) __attribute__((weak));
+extern void *wm8958_platform_data(void *info) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * early_printk_intel_mid.c - early consoles for Intel MID platforms
+ *
+ * Copyright (c) 2008-2010, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/*
+ * Currently we have 3 types of early printk consoles: PTI, HSU and
+ * MAX3110 SPI-UART.
+ * PTI is available for mdfld, clv and mrfld.
+ * HSU is available for mdfld, clv and mrfld. But it depends on board design.
+ * Some boards don't have HSU UART pins routed to the connector so we can't
+ * use it.
+ * Max3110 SPI-UART is a stand-alone chip with SPI interface located in the
+ * debug card. Drivers can access to this chip via Soc's SPI controller or SSP
+ * controller(working in SPI mode).
+ * Max3110 is available for mrst, mdfld, clv and mrfld. But for mrst, mdfld
+ * and clv, MAX3110 is connected to SPI controller, for mrfld, MAX3110 is
+ * connected to SSP controller.
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/pti.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/intel-mid.h>
+
+#define MRST_SPI_TIMEOUT 0x200000
+#define MRST_REGBASE_SPI0 0xff128000
+#define MRST_REGBASE_SPI1 0xff128400
+#define CLV_REGBASE_SPI1 0xff135000
+#define MRST_CLK_SPI0_REG 0xff11d86c
+#define MRFLD_SSP_TIMEOUT 0x200000
+#define MRFLD_REGBASE_SSP5 0xff189000
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET 0
+
+#define SPI_FRF_OFFSET 4
+#define SPI_FRF_SPI 0x0
+#define SPI_FRF_SSP 0x1
+#define SPI_FRF_MICROWIRE 0x2
+#define SPI_FRF_RESV 0x3
+
+#define SPI_MODE_OFFSET 6
+#define SPI_SCPH_OFFSET 6
+#define SPI_SCOL_OFFSET 7
+#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_TR 0x0 /* xmit & recv */
+#define SPI_TMOD_TO 0x1 /* xmit only */
+#define SPI_TMOD_RO 0x2 /* recv only */
+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET 10
+#define SPI_SRL_OFFSET 11
+#define SPI_CFS_OFFSET 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK 0x7f /* cover 7 bits */
+#define SR_BUSY (1 << 0)
+#define SR_TF_NOT_FULL (1 << 1)
+#define SR_TF_EMPT (1 << 2)
+#define SR_RF_NOT_EMPT (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_TX_ERR (1 << 5)
+#define SR_DCOL (1 << 6)
+
+/* SR bit fields for SSP*/
+#define SSP_SR_TF_NOT_FULL (1 << 2)
+
+static int ssp_timing_wr; /* Tangier A0 SSP timing workaround */
+
+static unsigned int early_pti_console_channel;
+static unsigned int early_pti_control_channel;
+
+/* SPI controller registers */
+struct dw_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+
+ /* Currently operates as 32 bits, though only the low 16 bits matter */
+ u32 dr;
+} __packed;
+
+/* SSP controler registers */
+struct dw_ssp_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 sr;
+ u32 ssitr;
+ u32 dr;
+} __packed;
+
+#define dw_readl(dw, name) __raw_readl(&(dw)->name)
+#define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name)
+
+/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
+static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
+
+static u32 *pclk_spi0;
+/* Always contains an accessible address, start with 0 */
+static struct dw_spi_reg *pspi;
+static struct dw_ssp_reg *pssp;
+
+static struct kmsg_dumper dw_dumper;
+static int dumper_registered;
+
+static void dw_kmsg_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ static char line[1024];
+ size_t len;
+
+ /* When run to this, we'd better re-init the HW */
+ mrst_early_console_init();
+
+ while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+ early_mrst_console.write(&early_mrst_console, line, len);
+}
+
+/* Set the ratio rate to 115200, 8n1, IRQ disabled */
+static void max3110_spi_write_config(void)
+{
+ u16 config;
+
+ config = 0xc001;
+ dw_writel(pspi, dr, config);
+}
+
+/* Translate char to a eligible word and send to max3110 */
+static void max3110_spi_write_data(char c)
+{
+ u16 data;
+
+ data = 0x8000 | c;
+ dw_writel(pspi, dr, data);
+}
+
+/* similar to max3110_spi_write_config, but via SSP controller */
+static void max3110_ssp_write_config(void)
+{
+ u16 config;
+
+ config = 0xc001;
+ dw_writel(pssp, dr, config);
+ dw_readl(pssp, dr);
+ udelay(10);
+ return;
+}
+
+/* similar to max3110_spi_write_data, but via SSP controller */
+static void max3110_ssp_write_data(char c)
+{
+ u16 data;
+
+ data = 0x8000 | c;
+ dw_writel(pssp, dr, data);
+ dw_readl(pssp, dr);
+ udelay(10);
+ return;
+}
+
+void mrst_early_console_init(void)
+{
+ u32 ctrlr0 = 0;
+ u32 spi0_cdiv;
+ u32 freq; /* Freqency info only need be searched once */
+
+ /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
+ pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+ MRST_CLK_SPI0_REG);
+ spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
+ freq = 100000000 / (spi0_cdiv + 1);
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL)
+ mrst_spi_paddr = MRST_REGBASE_SPI1;
+ else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+ mrst_spi_paddr = CLV_REGBASE_SPI1;
+
+ pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+ mrst_spi_paddr);
+
+ /* Disable SPI controller */
+ dw_writel(pspi, ssienr, 0);
+
+ /* Set control param, 8 bits, transmit only mode */
+ ctrlr0 = dw_readl(pspi, ctrl0);
+
+ ctrlr0 &= 0xfcc0;
+ ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
+ | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
+ dw_writel(pspi, ctrl0, ctrlr0);
+
+ /*
+ * Change the spi0 clk to comply with 115200 bps, use 100000 to
+ * calculate the clk dividor to make the clock a little slower
+ * than real baud rate.
+ */
+ dw_writel(pspi, baudr, freq/100000);
+
+ /* Disable all INT for early phase */
+ dw_writel(pspi, imr, 0x0);
+
+ /* Set the cs to spi-uart */
+ dw_writel(pspi, ser, 0x2);
+
+ /* Enable the HW, the last step for HW init */
+ dw_writel(pspi, ssienr, 0x1);
+
+ /* Set the default configuration */
+ max3110_spi_write_config();
+
+ /* Register the kmsg dumper */
+ if (!dumper_registered) {
+ dw_dumper.dump = dw_kmsg_dump;
+ kmsg_dump_register(&dw_dumper);
+ dumper_registered = 1;
+ }
+}
+
+/* Slave select should be called in the read/write function */
+static void early_mrst_spi_putc(char c)
+{
+ unsigned int timeout;
+ u32 sr;
+
+ timeout = MRST_SPI_TIMEOUT;
+ /* Early putc needs to make sure the TX FIFO is not full */
+ while (--timeout) {
+ sr = dw_readl(pspi, sr);
+ if (!(sr & SR_TF_NOT_FULL))
+ cpu_relax();
+ else
+ break;
+ }
+
+ if (!timeout)
+ pr_warn("MRST earlycon: timed out\n");
+ else
+ max3110_spi_write_data(c);
+}
+
+/* Early SPI only uses polling mode */
+static void early_mrst_spi_write(struct console *con, const char *str,
+ unsigned n)
+{
+ int i;
+
+ for (i = 0; i < n && *str; i++) {
+ if (*str == '\n')
+ early_mrst_spi_putc('\r');
+ early_mrst_spi_putc(*str);
+ str++;
+ }
+}
+
+struct console early_mrst_console = {
+ .name = "earlymrst",
+ .write = early_mrst_spi_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void mrfld_early_console_init(void)
+{
+ u32 ctrlr0 = 0;
+
+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRFLD_REGBASE_SSP5);
+
+ pssp = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+ (MRFLD_REGBASE_SSP5 & (PAGE_SIZE - 1)));
+
+ if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)
+ ssp_timing_wr = 1;
+
+ /* mask interrupts, clear enable and set DSS config */
+ /* SSPSCLK on active transfers only */
+ if (ssp_timing_wr) {
+ dw_writel(pssp, ctrl0, 0xc12c0f);
+ dw_writel(pssp, ctrl1, 0x0);
+ } else {
+ dw_writel(pssp, ctrl0, 0xc0000f);
+ dw_writel(pssp, ctrl1, 0x10000000);
+ }
+
+ dw_readl(pssp, sr);
+
+ /* enable port */
+ ctrlr0 = dw_readl(pssp, ctrl0);
+ ctrlr0 |= 0x80;
+ dw_writel(pssp, ctrl0, ctrlr0);
+}
+
+/* slave select should be called in the read/write function */
+static int early_mrfld_putc(char c)
+{
+ unsigned int timeout;
+ u32 sr;
+
+ timeout = MRFLD_SSP_TIMEOUT;
+ /* early putc need make sure the TX FIFO is not full*/
+ while (timeout--) {
+ sr = dw_readl(pssp, sr);
+ if (ssp_timing_wr) {
+ if (sr & 0xF00)
+ cpu_relax();
+ else
+ break;
+ } else {
+ if (!(sr & SSP_SR_TF_NOT_FULL))
+ cpu_relax();
+ else
+ break;
+ }
+ }
+
+ if (timeout == 0xffffffff) {
+ pr_info("SSP: waiting timeout\n");
+ return -1;
+ }
+
+ max3110_ssp_write_data(c);
+ return 0;
+}
+
+static void early_mrfld_write(struct console *con,
+ const char *str, unsigned n)
+{
+ int i;
+
+ for (i = 0; i < n && *str; i++) {
+ if (*str == '\n')
+ early_mrfld_putc('\r');
+ early_mrfld_putc(*str);
+
+ str++;
+ }
+}
+
+struct console early_mrfld_console = {
+ .name = "earlymrfld",
+ .write = early_mrfld_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void mrfld_early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vscnprintf(buf, 512, fmt, ap);
+ va_end(ap);
+
+ early_mrfld_console.write(&early_mrfld_console, buf, n);
+}
+
+/*
+ * Following is the early console based on High Speed UART device.
+ */
+#define MERR_HSU_PORT_BASE 0xff010180
+#define MERR_HSU_CLK_CTL 0xff00b830
+#define MFLD_HSU_PORT_BASE 0xffa28080
+
+static void __iomem *phsu;
+
+void hsu_early_console_init(const char *s)
+{
+ unsigned long paddr, port = 0;
+ u8 lcr;
+ int *clkctl;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ paddr = MERR_HSU_PORT_BASE;
+ clkctl = (int *)set_fixmap_offset_nocache(FIX_CLOCK_CTL,
+ MERR_HSU_CLK_CTL);
+ } else {
+ paddr = MFLD_HSU_PORT_BASE;
+ clkctl = NULL;
+ }
+
+ /*
+ * Select the early HSU console port if specified by user in the
+ * kernel command line.
+ */
+ if (*s && !kstrtoul(s, 10, &port))
+ port = clamp_val(port, 0, 2);
+
+ paddr += port * 0x80;
+ phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
+
+ /* Disable FIFO */
+ writeb(0x0, phsu + UART_FCR);
+
+ /* Set to default 115200 bps, 8n1 */
+ lcr = readb(phsu + UART_LCR);
+ writeb((0x80 | lcr), phsu + UART_LCR);
+ writeb(0x01, phsu + UART_DLL);
+ writeb(0x00, phsu + UART_DLM);
+ writeb(lcr, phsu + UART_LCR);
+ writel(0x0010, phsu + UART_ABR * 4);
+ writel(0x0010, phsu + UART_PS * 4);
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ /* detect HSU clock is 50M or 19.2M */
+ if (clkctl && *clkctl & (1 << 16))
+ writel(0x0120, phsu + UART_MUL * 4); /* for 50M */
+ else
+ writel(0x05DC, phsu + UART_MUL * 4); /* for 19.2M */
+ } else
+ writel(0x0240, phsu + UART_MUL * 4);
+
+ writel(0x3D09, phsu + UART_DIV * 4);
+
+ writeb(0x8, phsu + UART_MCR);
+ writeb(0x7, phsu + UART_FCR);
+ writeb(0x3, phsu + UART_LCR);
+
+ /* Clear IRQ status */
+ readb(phsu + UART_LSR);
+ readb(phsu + UART_RX);
+ readb(phsu + UART_IIR);
+ readb(phsu + UART_MSR);
+
+ /* Enable FIFO */
+ writeb(0x7, phsu + UART_FCR);
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void early_hsu_putc(char ch)
+{
+ unsigned int timeout = 10000; /* 10ms */
+ u8 status;
+
+ while (--timeout) {
+ status = readb(phsu + UART_LSR);
+ if (status & BOTH_EMPTY)
+ break;
+ udelay(1);
+ }
+
+ /* Only write the char when there was no timeout */
+ if (timeout)
+ writeb(ch, phsu + UART_TX);
+}
+
+static void early_hsu_write(struct console *con, const char *str, unsigned n)
+{
+ int i;
+
+ for (i = 0; i < n && *str; i++) {
+ if (*str == '\n')
+ early_hsu_putc('\r');
+ early_hsu_putc(*str);
+ str++;
+ }
+}
+
+struct console early_hsu_console = {
+ .name = "earlyhsu",
+ .write = early_hsu_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void hsu_early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vscnprintf(buf, 512, fmt, ap);
+ va_end(ap);
+
+ early_hsu_console.write(&early_hsu_console, buf, n);
+}
+
+#define PTI_ADDRESS 0xfd800000
+#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */
+
+static void early_pti_write_to_aperture(struct pti_masterchannel *mc,
+ u8 *buf, int len)
+{
+ int dwordcnt, final, i;
+ u32 ptiword;
+ u8 *p ;
+ u32 pti_phys_address ;
+ u32 __iomem *aperture;
+
+ p = buf;
+
+ /*
+ calculate the aperture offset from the base using the master and
+ channel id's.
+ */
+ pti_phys_address = PTI_ADDRESS +
+ (mc->master << 15) + (mc->channel << 8);
+
+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, pti_phys_address);
+ aperture = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+ (pti_phys_address & (PAGE_SIZE - 1)));
+
+ dwordcnt = len >> 2;
+ final = len - (dwordcnt << 2); /* final = trailing bytes */
+ if (final == 0 && dwordcnt != 0) { /* always have a final dword */
+ final += 4;
+ dwordcnt--;
+ }
+
+ for (i = 0; i < dwordcnt; i++) {
+ ptiword = be32_to_cpu(*(u32 *)p);
+ p += 4;
+ iowrite32(ptiword, aperture);
+ }
+
+ aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */
+ ptiword = 0;
+
+ for (i = 0; i < final; i++)
+ ptiword |= *p++ << (24-(8*i));
+
+ iowrite32(ptiword, aperture);
+
+ return;
+}
+
+static int pti_early_console_init(void)
+{
+ early_pti_console_channel = 0;
+ early_pti_control_channel = 0;
+ return 0;
+}
+
+static void early_pti_write(struct console *con,
+ const char *str, unsigned n)
+{
+ static struct pti_masterchannel mccontrol = {.master = 72,
+ .channel = 0};
+ static struct pti_masterchannel mcconsole = {.master = 73,
+ .channel = 0};
+ const char *control_format = "%3d %3d %s";
+
+ /*
+ * Since we access the comm member in current's task_struct,
+ * we only need to be as large as what 'comm' in that
+ * structure is.
+ */
+ char comm[TASK_COMM_LEN];
+ u8 control_frame[CONTROL_FRAME_LEN];
+
+ /* task information */
+ if (in_irq())
+ strncpy(comm, "hardirq", sizeof(comm));
+ else if (in_softirq())
+ strncpy(comm, "softirq", sizeof(comm));
+ else
+ strncpy(comm, current->comm, sizeof(comm));
+
+ /* Absolutely ensure our buffer is zero terminated */
+ comm[TASK_COMM_LEN-1] = 0;
+
+ mccontrol.channel = early_pti_control_channel;
+ early_pti_control_channel = (early_pti_control_channel + 1) & 0x7f;
+
+ mcconsole.channel = early_pti_console_channel;
+ early_pti_console_channel = (early_pti_console_channel + 1) & 0x7f;
+
+ snprintf(control_frame, CONTROL_FRAME_LEN, control_format,
+ mcconsole.master, mcconsole.channel, comm);
+
+ early_pti_write_to_aperture(&mccontrol, control_frame,
+ strlen(control_frame));
+ early_pti_write_to_aperture(&mcconsole, (u8 *)str, n);
+
+}
+
+struct console early_pti_console = {
+ .name = "earlypti",
+ .early_setup = pti_early_console_init,
+ .write = early_pti_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void pti_early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ early_pti_console.write(&early_pti_console, buf, n);
+}
--- /dev/null
+/*
+ * intel-mid.c: Intel MID platform setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ * Author: Sathyanarayanan KN(sathyanarayanan.kuppuswamy@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#define SFI_SIG_OEM0 "OEM0"
+#define pr_fmt(fmt) "intel_mid: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+#include "intel_soc_pmu.h"
+
+/*
+ * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
+ * cmdline option x86_intel_mid_timer can be used to override the configuration
+ * to prefer one or the other.
+ * at runtime, there are basically three timer configurations:
+ * 1. per cpu apbt clock only
+ * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
+ * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
+ *
+ * by default (without cmdline option), platform code first detects cpu type
+ * to see if we are on lincroft or penwell, then set up both lapic or apbt
+ * clocks accordingly.
+ * i.e. by default, medfield uses configuration #2, moorestown uses #1.
+ * config #3 is supported but not recommended on medfield.
+ *
+ * rating and feature summary:
+ * lapic (with C3STOP) --------- 100
+ * apbt (always-on) ------------ 110
+ * lapic (always-on,ARAT) ------ 150
+ */
+__cpuinitdata enum intel_mid_timer_options intel_mid_timer_options;
+
+/* intel_mid_ops to store sub arch ops */
+struct intel_mid_ops *intel_mid_ops;
+/* getter function for sub arch ops*/
+static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
+enum intel_mid_cpu_type __intel_mid_cpu_chip;
+EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+
+static int force_cold_boot;
+module_param(force_cold_boot, int, 0644);
+MODULE_PARM_DESC(force_cold_boot,
+ "Set to Y to force a COLD BOOT instead of a COLD RESET "
+ "on the next reboot system call.");
+u32 nbr_hsi_clients = 2;
+static void intel_mid_power_off(void)
+{
+ pmu_power_off();
+};
+
+static void intel_mid_reboot(void)
+{
+ if (intel_scu_ipc_fw_update()) {
+ pr_debug("intel_scu_fw_update: IFWI upgrade failed...\n");
+ }
+ if (force_cold_boot)
+ rpmsg_send_generic_simple_command(IPCMSG_COLD_BOOT, 0);
+ else
+ rpmsg_send_generic_simple_command(IPCMSG_COLD_RESET, 0);
+}
+
+static unsigned long __init intel_mid_calibrate_tsc(void)
+{
+ return 0;
+}
+
+static void __init intel_mid_time_init(void)
+{
+
+#ifdef CONFIG_SFI
+ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+#endif
+ switch (intel_mid_timer_options) {
+ case INTEL_MID_TIMER_APBT_ONLY:
+ break;
+ case INTEL_MID_TIMER_LAPIC_APBT:
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+ break;
+ default:
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
+ break;
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+ return;
+ }
+ /* we need at least one APB timer */
+ pre_init_apic_IRQ0();
+ apbt_time_init();
+}
+
+static void __cpuinit intel_mid_arch_setup(void)
+{
+ if (boot_cpu_data.x86 != 6) {
+ pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ return;
+ }
+ switch (boot_cpu_data.x86_model) {
+ case 0x35:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW;
+ break;
+ case 0x3C:
+ case 0x4A:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ break;
+ case 0x5A:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_ANNIEDALE;
+ break;
+ case 0x5D:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CARBONCANYON;
+ break;
+ case 0x27:
+ default:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ break;
+ }
+
+ if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
+ intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
+ else {
+ intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
+ pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+ }
+
+ if (intel_mid_ops->arch_setup)
+ intel_mid_ops->arch_setup();
+}
+
+/* MID systems don't have i8042 controller */
+static int intel_mid_i8042_detect(void)
+{
+ return 0;
+}
+
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char intel_mid_get_nmi_reason(void)
+{
+ return 0;
+}
+
+/*
+ * Moorestown specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_intel_mid_early_setup(void)
+{
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.resources.reserve_resources = x86_init_noop;
+ x86_init.oem.arch_setup = intel_mid_arch_setup;
+ x86_init.timers.setup_percpu_clockev = x86_init_noop;
+ x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+ x86_init.timers.timer_init = intel_mid_time_init;
+ x86_init.timers.setup_percpu_clockev = x86_init_noop;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+
+ x86_init.oem.arch_setup = intel_mid_arch_setup;
+
+ x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+ x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
+ x86_platform.i8042_detect = intel_mid_i8042_detect;
+ x86_init.timers.wallclock_init = intel_mid_rtc_init;
+ x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
+
+ x86_init.pci.init = intel_mid_pci_init;
+ x86_init.pci.fixup_irqs = x86_init_noop;
+
+ legacy_pic = &null_legacy_pic;
+
+ pm_power_off = intel_mid_power_off;
+ machine_ops.emergency_restart = intel_mid_reboot;
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ set_bit(MP_BUS_ISA, mp_bus_not_pci);
+}
+
+/*
+ * if user does not want to use per CPU apb timer, just give it a lower rating
+ * than local apic timer and skip the late per cpu timer init.
+ */
+static inline int __init setup_x86_intel_mid_timer(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp("apbt_only", arg) == 0)
+ intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY;
+ else if (strcmp("lapic_and_apbt", arg) == 0)
+ intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
+ else {
+ pr_warn("X86 INTEL_MID timer option %s not recognised"
+ " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+ arg);
+ return -EINVAL;
+ }
+ return 0;
+}
+__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
--- /dev/null
+#include <linux/export.h>
+#include <linux/pci.h>
+
+#include <asm/intel_mid_pcihelpers.h>
+
+/* Unified message bus read/write operation */
+static DEFINE_SPINLOCK(msgbus_lock);
+
+static struct pci_dev *pci_root;
+
+static int intel_mid_msgbus_init(void)
+{
+ pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (!pci_root) {
+ printk(KERN_ALERT "%s: Error: msgbus PCI handle NULL",
+ __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+fs_initcall(intel_mid_msgbus_init);
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd)
+{
+ unsigned long irq_flags;
+ u32 data;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+ return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
+
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
+
+u32 intel_mid_msgbus_read32(u8 port, u32 addr)
+{
+ unsigned long irq_flags;
+ u32 data;
+ u32 cmd;
+ u32 cmdext;
+
+ cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
+ ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+ cmdext = addr & 0xffffff00;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+
+ if (cmdext) {
+ /* This resets to 0 automatically, no need to write 0 */
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+ cmdext);
+ }
+
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+ return data;
+}
+
+EXPORT_SYMBOL(intel_mid_msgbus_read32);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
+{
+ unsigned long irq_flags;
+ u32 cmd;
+ u32 cmdext;
+
+ cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
+ ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+ cmdext = addr & 0xffffff00;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+ if (cmdext) {
+ /* This resets to 0 automatically, no need to write 0 */
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+ cmdext);
+ }
+
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32);
+
+/* called only from where is later then fs_initcall */
+u32 intel_mid_soc_stepping(void)
+{
+ return pci_root->revision;
+}
+EXPORT_SYMBOL(intel_mid_soc_stepping);
+
--- /dev/null
+/*
+ * intel_mid_scu.c: Intel MID SCU platform initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/irq.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+struct rpmsg_ns_list nslist = {
+ .list = LIST_HEAD_INIT(nslist.list),
+ .lock = __MUTEX_INITIALIZER(nslist.lock),
+};
+
+static struct intel_mid_rproc_pdata intel_scu_pdata = {
+ .name = "intel_rproc_scu",
+ .firmware = "intel_mid/intel_mid_remoteproc.fw",
+ .nslist = &nslist,
+};
+
+static u64 intel_scu_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device intel_scu_device = {
+ .name = "intel_rproc_scu",
+ .id = -1,
+ .dev = {
+ .platform_data = &intel_scu_pdata,
+ .dma_mask = &intel_scu_dmamask,
+ },
+};
+
+void register_rpmsg_service(char *name, int id, u32 addr)
+{
+ struct rpmsg_ns_info *info;
+ info = rpmsg_ns_alloc(name, id, addr);
+ rpmsg_ns_add_to_list(info, &nslist);
+}
+
+int intel_mid_rproc_init(void)
+{
+ int err;
+
+ /* generic rpmsg channels */
+ register_rpmsg_service("rpmsg_ipc_command", RPROC_SCU, RP_IPC_COMMAND);
+ register_rpmsg_service("rpmsg_ipc_simple_command",
+ RPROC_SCU, RP_IPC_SIMPLE_COMMAND);
+ register_rpmsg_service("rpmsg_ipc_raw_command",
+ RPROC_SCU, RP_IPC_RAW_COMMAND);
+
+ register_rpmsg_service("rpmsg_pmic", RPROC_SCU, RP_PMIC_ACCESS);
+ register_rpmsg_service("rpmsg_mip", RPROC_SCU, RP_MIP_ACCESS);
+ register_rpmsg_service("rpmsg_fw_update",
+ RPROC_SCU, RP_FW_ACCESS);
+ register_rpmsg_service("rpmsg_ipc_util",
+ RPROC_SCU, RP_IPC_UTIL);
+ register_rpmsg_service("rpmsg_flis", RPROC_SCU, RP_FLIS_ACCESS);
+ register_rpmsg_service("rpmsg_watchdog", RPROC_SCU, RP_SET_WATCHDOG);
+ register_rpmsg_service("rpmsg_umip", RPROC_SCU, RP_UMIP_ACCESS);
+ register_rpmsg_service("rpmsg_osip", RPROC_SCU, RP_OSIP_ACCESS);
+ register_rpmsg_service("rpmsg_vrtc", RPROC_SCU, RP_VRTC);
+ register_rpmsg_service("rpmsg_fw_logging", RPROC_SCU, RP_FW_LOGGING);
+ register_rpmsg_service("rpmsg_kpd_led", RPROC_SCU,
+ RP_MSIC_KPD_LED);
+ register_rpmsg_service("rpmsg_modem_nvram", RPROC_SCU,
+ RP_IPC_RAW_COMMAND);
+ register_rpmsg_service("rpmsg_mid_pwm", RPROC_SCU,
+ RP_MSIC_PWM);
+
+ err = platform_device_register(&intel_scu_device);
+ if (err < 0)
+ pr_err("Fail to register intel-mid-rproc platform device.\n");
+
+ return 0;
+}
+arch_initcall_sync(intel_mid_rproc_init);
--- /dev/null
+/*
+ * intel_mid_scu.h: SCU initialization header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _INTEL_MID_SCU_H_
+#define _INTEL_MID_SCU_H_
+extern int intel_mid_rproc_init(void) __attribute__((weak));
+#endif
--- /dev/null
+/*
+ * intel_mid_sfi.c: Intel MID SFI initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Sathyanarayanan KN
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/skbuff.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+
+#define SFI_SIG_OEM0 "OEM0"
+#define MAX_IPCDEVS 24
+#define MAX_SCU_SPI 24
+#define MAX_SCU_I2C 24
+
+static struct platform_device *ipc_devs[MAX_IPCDEVS];
+static struct spi_board_info *spi_devs[MAX_SCU_SPI];
+static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
+static struct sfi_gpio_table_entry *gpio_table;
+static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
+static int ipc_next_dev;
+static int spi_next_dev;
+static int i2c_next_dev;
+static int i2c_bus[MAX_SCU_I2C];
+static int gpio_num_entry;
+static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
+int sfi_mrtc_num;
+int sfi_mtimer_num;
+
+struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+
+struct blocking_notifier_head intel_scu_notifier =
+ BLOCKING_NOTIFIER_INIT(intel_scu_notifier);
+EXPORT_SYMBOL_GPL(intel_scu_notifier);
+
+/* parse all the mtimer info to a static mtimer array */
+int __init sfi_parse_mtmr(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_timer_table_entry *pentry;
+ struct mpc_intsrc mp_irq;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sfi_mtimer_num) {
+ sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_timer_table_entry);
+ pentry = (struct sfi_timer_table_entry *) sb->pentry;
+ totallen = sfi_mtimer_num * sizeof(*pentry);
+ memcpy(sfi_mtimer_array, pentry, totallen);
+ }
+
+ pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
+ pentry = sfi_mtimer_array;
+ for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
+ pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
+ totallen, (u32)pentry->phys_addr,
+ pentry->freq_hz, pentry->irq);
+ if (!pentry->irq)
+ continue;
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+ mp_irq.irqflag = 5;
+ mp_irq.srcbus = MP_BUS_ISA;
+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
+ }
+
+ return 0;
+}
+
+struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
+{
+ int i;
+ if (hint < sfi_mtimer_num) {
+ if (!sfi_mtimer_usage[hint]) {
+ pr_debug("hint taken for timer %d irq %d\n",
+ hint, sfi_mtimer_array[hint].irq);
+ sfi_mtimer_usage[hint] = 1;
+ return &sfi_mtimer_array[hint];
+ }
+ }
+ /* take the first timer available */
+ for (i = 0; i < sfi_mtimer_num;) {
+ if (!sfi_mtimer_usage[i]) {
+ sfi_mtimer_usage[i] = 1;
+ return &sfi_mtimer_array[i];
+ }
+ i++;
+ }
+ return NULL;
+}
+
+void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
+{
+ int i;
+ for (i = 0; i < sfi_mtimer_num;) {
+ if (mtmr->irq == sfi_mtimer_array[i].irq) {
+ sfi_mtimer_usage[i] = 0;
+ return;
+ }
+ i++;
+ }
+}
+
+/* parse all the mrtc info to a global mrtc array */
+int __init sfi_parse_mrtc(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_rtc_table_entry *pentry;
+ struct mpc_intsrc mp_irq;
+
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sfi_mrtc_num) {
+ sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_rtc_table_entry);
+ pentry = (struct sfi_rtc_table_entry *)sb->pentry;
+ totallen = sfi_mrtc_num * sizeof(*pentry);
+ memcpy(sfi_mrtc_array, pentry, totallen);
+ }
+
+ pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
+ pentry = sfi_mrtc_array;
+ for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
+ pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
+ totallen, (u32)pentry->phys_addr, pentry->irq);
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = 0xf; /* level trigger and active low */
+ mp_irq.srcbus = MP_BUS_ISA;
+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
+ }
+ return 0;
+}
+
+
+/*
+ * Parsing GPIO table first, since the DEVS table will need this table
+ * to map the pin name to the actual pin.
+ */
+static int __init sfi_parse_gpio(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_gpio_table_entry *pentry;
+ int num, i;
+
+ if (gpio_table)
+ return 0;
+ sb = (struct sfi_table_simple *)table;
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
+ pentry = (struct sfi_gpio_table_entry *)sb->pentry;
+
+ gpio_table = (struct sfi_gpio_table_entry *)
+ kmalloc(num * sizeof(*pentry), GFP_KERNEL);
+ if (!gpio_table)
+ return -1;
+ memcpy(gpio_table, pentry, num * sizeof(*pentry));
+ gpio_num_entry = num;
+
+ pr_debug("GPIO pin info:\n");
+ for (i = 0; i < num; i++, pentry++)
+ pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
+ " pin = %d\n", i,
+ pentry->controller_name,
+ pentry->pin_name,
+ pentry->pin_no);
+ return 0;
+}
+
+int get_gpio_by_name(const char *name)
+{
+ struct sfi_gpio_table_entry *pentry = gpio_table;
+ int i;
+
+ if (!pentry)
+ return -1;
+ for (i = 0; i < gpio_num_entry; i++, pentry++) {
+ if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
+ return pentry->pin_no;
+ }
+ return -1;
+}
+
+void __init intel_scu_device_register(struct platform_device *pdev)
+{
+ if (ipc_next_dev == MAX_IPCDEVS)
+ pr_err("too many SCU IPC devices");
+ else
+ ipc_devs[ipc_next_dev++] = pdev;
+}
+
+static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
+{
+ struct spi_board_info *new_dev;
+
+ if (spi_next_dev == MAX_SCU_SPI) {
+ pr_err("too many SCU SPI devices");
+ return;
+ }
+
+ new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!new_dev) {
+ pr_err("failed to alloc mem for delayed spi dev %s\n",
+ sdev->modalias);
+ return;
+ }
+ memcpy(new_dev, sdev, sizeof(*sdev));
+
+ spi_devs[spi_next_dev++] = new_dev;
+}
+
+static void __init intel_scu_i2c_device_register(int bus,
+ struct i2c_board_info *idev)
+{
+ struct i2c_board_info *new_dev;
+
+ if (i2c_next_dev == MAX_SCU_I2C) {
+ pr_err("too many SCU I2C devices");
+ return;
+ }
+
+ new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ if (!new_dev) {
+ pr_err("failed to alloc mem for delayed i2c dev %s\n",
+ idev->type);
+ return;
+ }
+ memcpy(new_dev, idev, sizeof(*idev));
+
+ i2c_bus[i2c_next_dev] = bus;
+ i2c_devs[i2c_next_dev++] = new_dev;
+}
+
+/* Called by IPC driver */
+void intel_scu_devices_create(void)
+{
+ int i;
+
+ for (i = 0; i < ipc_next_dev; i++)
+ platform_device_add(ipc_devs[i]);
+
+ for (i = 0; i < spi_next_dev; i++)
+ spi_register_board_info(spi_devs[i], 1);
+
+ for (i = 0; i < i2c_next_dev; i++) {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ adapter = i2c_get_adapter(i2c_bus[i]);
+ if (adapter) {
+ client = i2c_new_device(adapter, i2c_devs[i]);
+ if (!client)
+ pr_err("can't create i2c device %s\n",
+ i2c_devs[i]->type);
+ } else
+ i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
+ }
+ intel_scu_notifier_post(SCU_AVAILABLE, NULL);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_create);
+
+/* Called by IPC driver */
+void intel_scu_devices_destroy(void)
+{
+ int i;
+
+ intel_scu_notifier_post(SCU_DOWN, NULL);
+
+ for (i = 0; i < ipc_next_dev; i++)
+ platform_device_del(ipc_devs[i]);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
+
+static struct platform_device *psh_ipc;
+void intel_psh_devices_create(void)
+{
+ psh_ipc = platform_device_alloc("intel_psh_ipc", 0);
+ if (psh_ipc == NULL) {
+ pr_err("out of memory for platform device psh_ipc.\n");
+ return;
+ }
+
+ platform_device_add(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_create);
+
+void intel_psh_devices_destroy(void)
+{
+ if (psh_ipc)
+ platform_device_del(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_destroy);
+
+void __init install_irq_resource(struct platform_device *pdev, int irq)
+{
+ /* Single threaded */
+ static struct resource __initdata res = {
+ .name = "IRQ",
+ .flags = IORESOURCE_IRQ,
+ };
+ res.start = irq;
+ platform_device_add_resources(pdev, &res, 1);
+}
+
+static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct platform_device *pdev;
+ void *pdata = NULL;
+ pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+ pentry->name, pentry->irq);
+ pdata = dev->get_platform_data(pentry);
+ pdev = platform_device_alloc(pentry->name, 0);
+ if (pdev == NULL) {
+ pr_err("out of memory for SFI platform device '%s'.\n",
+ pentry->name);
+ return;
+ }
+ install_irq_resource(pdev, pentry->irq);
+
+ pdev->dev.platform_data = pdata;
+ intel_scu_device_register(pdev);
+}
+
+static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct spi_board_info spi_info;
+ void *pdata = NULL;
+
+ memset(&spi_info, 0, sizeof(spi_info));
+ strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
+ spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+ spi_info.bus_num = pentry->host_num;
+ spi_info.chip_select = pentry->addr;
+ spi_info.max_speed_hz = pentry->max_freq;
+ pr_info("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n",
+ spi_info.bus_num,
+ spi_info.modalias,
+ spi_info.irq,
+ spi_info.max_speed_hz,
+ spi_info.chip_select);
+
+ pdata = dev->get_platform_data(&spi_info);
+
+ spi_info.platform_data = pdata;
+ if (dev->delay)
+ intel_scu_spi_device_register(&spi_info);
+ else
+ spi_register_board_info(&spi_info, 1);
+}
+
+static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct i2c_board_info i2c_info;
+ void *pdata = NULL;
+
+ memset(&i2c_info, 0, sizeof(i2c_info));
+ strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
+ i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+ i2c_info.addr = pentry->addr;
+ pr_info("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n",
+ pentry->host_num,
+ i2c_info.type,
+ i2c_info.irq,
+ i2c_info.addr);
+ pdata = dev->get_platform_data(&i2c_info);
+ i2c_info.platform_data = pdata;
+
+ if (dev->delay)
+ intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
+ else
+ i2c_register_board_info(pentry->host_num, &i2c_info, 1);
+}
+
+static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct sd_board_info sd_info;
+ void *pdata = NULL;
+
+ memset(&sd_info, 0, sizeof(sd_info));
+ strncpy(sd_info.name, pentry->name, 16);
+ sd_info.bus_num = pentry->host_num;
+ sd_info.board_ref_clock = pentry->max_freq;
+ sd_info.addr = pentry->addr;
+ pr_info("SDIO bus = %d, name = %16.16s, "
+ "ref_clock = %d, addr =0x%x\n",
+ sd_info.bus_num,
+ sd_info.name,
+ sd_info.board_ref_clock,
+ sd_info.addr);
+ pdata = dev->get_platform_data(&sd_info);
+ sd_info.platform_data = pdata;
+}
+
+struct devs_id __init *get_device_id(u8 type, char *name)
+{
+ struct devs_id *dev = device_ids;
+
+ if (device_ids == NULL)
+ return NULL;
+
+ while (dev->name[0]) {
+ if (dev->type == type &&
+ !strncmp(dev->name, name, SFI_NAME_LEN)) {
+ return dev;
+ }
+ dev++;
+ }
+
+ return NULL;
+}
+
+static int __init sfi_parse_devs(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_device_table_entry *pentry;
+ struct devs_id *dev = NULL;
+ int num, i;
+ int ioapic;
+ struct io_apic_irq_attr irq_attr;
+
+ sb = (struct sfi_table_simple *)table;
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
+ pentry = (struct sfi_device_table_entry *)sb->pentry;
+
+ for (i = 0; i < num; i++, pentry++) {
+ int irq = pentry->irq;
+
+ if (irq != (u8)0xff) { /* native RTE case */
+ /* these SPI2 devices are not exposed to system as PCI
+ * devices, but they have separate RTE entry in IOAPIC
+ * so we have to enable them one by one here
+ */
+ ioapic = mp_find_ioapic(irq);
+ if (ioapic >= 0) {
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ if (intel_mid_identify_cpu() ==
+ INTEL_MID_CPU_CHIP_TANGIER) {
+ if (!strncmp(pentry->name,
+ "r69001-ts-i2c", 13))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (!strncmp(pentry->name,
+ "synaptics_3202", 14))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (irq == 41)
+ /* fast_int_1 */
+ irq_attr.polarity = 1;
+ else
+ /* active high */
+ irq_attr.polarity = 0;
+ } else {
+ /* PNW and CLV go with active low */
+ irq_attr.polarity = 1;
+ }
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ } else
+ printk(KERN_INFO "APIC entry not found for: name=%s, irq=%d, ioapic=%d\n",
+ pentry->name, irq, ioapic);
+ }
+ dev = get_device_id(pentry->type, pentry->name);
+
+ if ((dev == NULL) || (dev->get_platform_data == NULL))
+ continue;
+
+ if (dev->device_handler) {
+ dev->device_handler(pentry, dev);
+ } else {
+ switch (pentry->type) {
+ case SFI_DEV_TYPE_IPC:
+ sfi_handle_ipc_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_SPI:
+ sfi_handle_spi_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_I2C:
+ sfi_handle_i2c_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_SD:
+ sfi_handle_sd_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_HSI:
+ case SFI_DEV_TYPE_UART:
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __init sfi_parse_oemb(struct sfi_table_header *table)
+{
+ struct sfi_table_oemb *oemb;
+ u32 board_id;
+ u8 sig[SFI_SIGNATURE_SIZE + 1] = {'\0'};
+ u8 oem_id[SFI_OEM_ID_SIZE + 1] = {'\0'};
+ u8 oem_table_id[SFI_OEM_TABLE_ID_SIZE + 1] = {'\0'};
+
+ oemb = (struct sfi_table_oemb *) table;
+ if (!oemb) {
+ pr_err("%s: fail to read SFI OEMB Layout\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ board_id = oemb->board_id | (oemb->board_fab << 4);
+
+ snprintf(sig, (SFI_SIGNATURE_SIZE + 1), "%s", oemb->header.sig);
+ snprintf(oem_id, (SFI_OEM_ID_SIZE + 1), "%s", oemb->header.oem_id);
+ snprintf(oem_table_id, (SFI_OEM_TABLE_ID_SIZE + 1), "%s",
+ oemb->header.oem_table_id);
+ pr_info("SFI OEMB Layout\n");
+ pr_info("\tOEMB signature : %s\n"
+ "\tOEMB length : %d\n"
+ "\tOEMB revision : %d\n"
+ "\tOEMB checksum : 0x%X\n"
+ "\tOEMB oem_id : %s\n"
+ "\tOEMB oem_table_id : %s\n"
+ "\tOEMB board_id : 0x%02X\n"
+ "\tOEMB iafw version : %03d.%03d\n"
+ "\tOEMB val_hooks version : %03d.%03d\n"
+ "\tOEMB ia suppfw version : %03d.%03d\n"
+ "\tOEMB scu runtime version : %03d.%03d\n"
+ "\tOEMB ifwi version : %03d.%03d\n",
+ sig,
+ oemb->header.len,
+ oemb->header.rev,
+ oemb->header.csum,
+ oem_id,
+ oem_table_id,
+ board_id,
+ oemb->iafw_major_version,
+ oemb->iafw_main_version,
+ oemb->val_hooks_major_version,
+ oemb->val_hooks_minor_version,
+ oemb->ia_suppfw_major_version,
+ oemb->ia_suppfw_minor_version,
+ oemb->scu_runtime_major_version,
+ oemb->scu_runtime_minor_version,
+ oemb->ifwi_major_version,
+ oemb->ifwi_minor_version
+ );
+ return 0;
+}
+
+static int __init intel_mid_platform_init(void)
+{
+ /* Get SFI OEMB Layout */
+ sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, sfi_parse_oemb);
+ sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
+ sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
+
+ return 0;
+}
+arch_initcall(intel_mid_platform_init);
/*
- * vrtc.c: Driver for virtual RTC device on Intel MID platform
+ * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform
*
* (C) Copyright 2009 Intel Corporation
*
#include <linux/sfi.h>
#include <linux/platform_device.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
#include <asm/time.h>
#include <asm/fixmap.h>
/* vRTC YEAR reg contains the offset to 1972 */
year += 1972;
- printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
+ pr_info("vRTC: sec: %d min: %d hour: %d day: %d "
"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
return mktime(year, mon, mday, hour, min, sec);
}
+/* Only care about the minutes and seconds */
int vrtc_set_mmss(unsigned long nowtime)
{
+ int real_sec, real_min;
unsigned long flags;
- struct rtc_time tm;
- int year;
- int retval = 0;
-
- rtc_time_to_tm(nowtime, &tm);
- if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
- /*
- * tm.year is the number of years since 1900, and the
- * vrtc need the years since 1972.
- */
- year = tm.tm_year - 72;
- spin_lock_irqsave(&rtc_lock, flags);
- vrtc_cmos_write(year, RTC_YEAR);
- vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
- vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
- vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
- vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
- vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
- spin_unlock_irqrestore(&rtc_lock, flags);
- } else {
- printk(KERN_ERR
- "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
- __FUNCTION__, nowtime);
- retval = -EINVAL;
- }
- return retval;
+ int vrtc_min;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ vrtc_min = vrtc_cmos_read(RTC_MINUTES);
+
+ real_sec = nowtime % 60;
+ real_min = nowtime / 60;
+ if (((abs(real_min - vrtc_min) + 15)/30) & 1)
+ real_min += 30;
+ real_min %= 60;
+
+ vrtc_cmos_write(real_sec, RTC_SECONDS);
+ vrtc_cmos_write(real_min, RTC_MINUTES);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
}
-void __init mrst_rtc_init(void)
+void __init intel_mid_rtc_init(void)
{
unsigned long vrtc_paddr;
};
/* Register the RTC device if appropriate */
-static int __init mrst_device_create(void)
+static int __init intel_mid_device_create(void)
{
/* No Moorestown, no device */
- if (!mrst_identify_cpu())
+ if (!intel_mid_identify_cpu())
return -ENODEV;
/* No timer, no device */
if (!sfi_mrtc_num)
return platform_device_register(&vrtc_device);
}
-module_init(mrst_device_create);
+module_init(intel_mid_device_create);
--- /dev/null
+/*
+ * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/* __attribute__((weak)) makes these declarations overridable */
+extern struct devs_id __initconst device_ids[] __attribute__((weak));
+/* For every CPU addition a new get_<cpuname>_ops interface needs
+ * to be added.
+ */
+extern void * __init get_penwell_ops(void) __attribute__((weak));
+extern void * __init get_cloverview_ops(void) __attribute__((weak));
+extern void * __init get_tangier_ops(void) __attribute__((weak));
+extern void * __init get_anniedale_ops(void) __attribute__((weak));
--- /dev/null
+/*
+ * intel_soc_clv.c - This driver provides utility api's for
+ * Cloverview platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+
+static unsigned short fastonoff_flag;
+
+static ssize_t fastonoff_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%hu\n", fastonoff_flag);
+}
+
+static ssize_t fastonoff_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t n)
+{
+ unsigned short value;
+ if (sscanf(buf, "%hu", &value) != 1 ||
+ (value != 0 && value != 1)) {
+ printk(KERN_ERR "fastonoff_store: Invalid value\n");
+ return -EINVAL;
+ }
+ fastonoff_flag = value;
+ return n;
+}
+
+static struct kobj_attribute fast_onoff_attr =
+ __ATTR(fastonoff, 0644, fastonoff_show, fastonoff_store);
+
+
+static void clv_init_sysfsfs(void)
+{
+ int error;
+ error = sysfs_create_file(power_kobj, &fast_onoff_attr.attr);
+ if (error)
+ printk(KERN_ERR "sysfs_create_file failed: %d\n", error);
+}
+
+static int clv_pmu_init(void)
+{
+ mid_pmu_cxt->s3_hint = C6_HINT;
+ clv_init_sysfsfs();
+ return 0;
+}
+
+static bool clv_pmu_enter(int s0ix_state)
+{
+ u32 s0ix_value;
+ int num_retry = PMU_MISC_SET_TIMEOUT;
+
+ if (fastonoff_flag && (s0ix_state == MID_S3_STATE))
+ s0ix_value = get_s0ix_val_set_pm_ssc(MID_FAST_ON_OFF_STATE);
+ else
+ s0ix_value = get_s0ix_val_set_pm_ssc(s0ix_state);
+
+ /* issue a command to SCU */
+ pmu_set_interrupt_enable();
+ writel(s0ix_value, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+ do {
+ if (readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+ break;
+ udelay(1);
+ } while (--num_retry);
+
+ if (!num_retry && !readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+ WARN(1, "%s: pm_msic not set.\n", __func__);
+
+ mid_pmu_cxt->s0ix_entered = s0ix_state;
+
+ return true;
+}
+
+static void clv_pmu_remove(void)
+{
+ /* Place holder */
+}
+
+static void clv_pmu_wakeup(void)
+{
+
+ /* Wakeup allother CPU's */
+ if (mid_pmu_cxt->s0ix_entered)
+ apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
+}
+
+static pci_power_t clv_pmu_choose_state(int device_lss)
+{
+ pci_power_t state;
+
+ switch (device_lss) {
+ case PMU_SECURITY_LSS_04:
+ state = PCI_D2;
+ break;
+
+ case PMU_USB_OTG_LSS_06:
+ case PMU_USB_HSIC_LSS_07:
+ case PMU_UART2_LSS_41:
+ state = PCI_D1;
+ break;
+
+ default:
+ state = PCI_D3hot;
+ break;
+ }
+
+ return state;
+}
+
+/**
+ * platform_set_pmu_ops - Set the global pmu method table.
+ * @ops: Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+ pmu_ops = &clv_pmu_ops;
+}
+
+struct platform_pmu_ops clv_pmu_ops = {
+ .init = clv_pmu_init,
+ .enter = clv_pmu_enter,
+ .wakeup = clv_pmu_wakeup,
+ .remove = clv_pmu_remove,
+ .pci_choose_state = clv_pmu_choose_state,
+ .set_power_state_ops = pmu_set_s0ix_possible,
+ .set_s0ix_complete = s0ix_complete,
+ .nc_set_power_state = mdfld_clv_nc_set_power_state,
+};
--- /dev/null
+/*
+ * intel_soc_clv.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER
+
+#define PM_SUPPORT 0x21
+
+#define ISP_POS 7
+#define ISP_SUB_CLASS 0x80
+#define PMU_MISC_SET_TIMEOUT 15000
+
+#define PMU1_MAX_DEVS 8
+#define PMU2_MAX_DEVS 55
+
+#define GFX_LSS_INDEX 1
+#define PMU_SDIO0_LSS_00 0
+#define PMU_EMMC0_LSS_01 1
+#define PMU_AONT_LSS_02 2
+#define PMU_HSI_LSS_03 3
+#define PMU_SECURITY_LSS_04 4
+#define PMU_EMMC1_LSS_05 5
+#define PMU_USB_OTG_LSS_06 6
+#define PMU_USB_HSIC_LSS_07 7
+#define PMU_AUDIO_ENGINE_LSS_08 8
+#define PMU_AUDIO_DMA_LSS_09 9
+#define PMU_SRAM_LSS_10 10
+#define PMU_SRAM_LSS_11 11
+#define PMU_SRAM_LSS_12 12
+#define PMU_SRAM_LSS_13 13
+#define PMU_SDIO2_LSS_14 14
+#define PMU_PTI_DAFCA_LSS_15 15
+#define PMU_SC_DMA_LSS_16 16
+#define PMU_SPIO_LSS_17 17
+#define PMU_SPI1_LSS_18 18
+#define PMU_SPI2_LSS_19 19
+#define PMU_I2C0_LSS_20 20
+#define PMU_I2C1_LSS_21 21
+#define PMU_HPET_LSS_22 22
+#define PMU_EXTTMR_LSS_23 23
+#define PMU_SC_FABRIC_LSS_24 24
+#define PMU_AUDIO_RAM_LSS_25 25
+#define PMU_SCU_ROM_LSS_26 26
+#define PMU_I2C2_LSS_27 27
+#define PMU_SSC_LSS_28 28
+#define PMU_SECURITY_LSS_29 29
+#define PMU_SDIO1_LSS_30 30
+#define PMU_vRTC_LSS_31 31
+#define PMU_SEC_TIMER_LSS_32 32
+#define PMU_I2C3_LSS_33 33
+#define PMU_I2C4_LSS_34 34
+#define PMU_I2C5_LSS_35 35
+#define PMU_SPI3_LSS_36 36
+#define PMU_GPIO1_LSS_37 37
+#define PMU_PWR_BUTTON_LSS_38 38
+#define PMU_GPIO0_LSS_39 39
+#define PMU_KEYBRD_LSS_40 40
+#define PMU_UART2_LSS_41 41
+#define PMU_ADC_LSS_42 42
+#define PMU_CHARGER_LSS_43 43
+#define PMU_SEC_TAPC_LSS_44 44
+#define PMU_RTC_LSS_45 45
+#define PMU_GPI_LSS_46 46
+#define PMU_BCU_LSS_47 47
+#define PMU_SSP2_LSS_48 48
+#define PMU_AUDIO_SLIM1_LSS_49 49
+#define PMU_AUDIO_SLIM2_LSS_50 50
+#define PMU_AUDIO_SSP0_LSS_51 51
+#define PMU_AUDIO_SSP1_LSS_52 52
+#define PMU_IOSF_OCP_BRG_LSS_53 53
+#define PMU_GP_DMA_LSS_54 54
+#define PMU_MSIC_RESET_LSS_55 55
+#define PMU_SOC_FUSE_LSS_56 56
+#define PMU_RSVD3_LSS_57 57
+#define PMU_SSP4_LSS_58 58
+#define PMU_RSVD5_LSS_59 59
+#define PMU_RSVD6_LSS_60 60
+#define PMU_RSVD7_LSS_61 61
+#define PMU_RSVD8_LSS_62 62
+#define PMU_RSVD9_LSS_63 63
+
+#define PMU_MAX_LSS 63
+#define PMU_LSS_IN_FIRST_DWORD 32
+
+#define EMMC0_LSS PMU_EMMC0_LSS_01
+
+#define S0IX_TARGET_SSS0_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+#define S0IX_TARGET_SSS2_MASK ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3_MASK ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define S0IX_TARGET_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0IX_TARGET_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2_MASK ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3_MASK ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I0_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define IGNORE_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_10) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_11) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+ SSMSK(D0I3_MASK, PMU_PTI_DAFCA_LSS_15))
+
+#define IGNORE_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SC_DMA_LSS_16-16) | \
+ SSMSK(D0I3_MASK, PMU_SPIO_LSS_17-16) | \
+ SSMSK(D0I3_MASK, PMU_HPET_LSS_22-16) | \
+ SSMSK(D0I3_MASK, PMU_EXTTMR_LSS_23-16) | \
+ SSMSK(D0I3_MASK, PMU_SC_FABRIC_LSS_24-16) | \
+ SSMSK(D0I3_MASK, PMU_SCU_ROM_LSS_26-16) | \
+ SSMSK(D0I3_MASK, PMU_SSC_LSS_28-16) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_29-16) | \
+ SSMSK(D0I3_MASK, PMU_vRTC_LSS_31-16))
+
+#define IGNORE_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_SEC_TIMER_LSS_32-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO0_LSS_39-32) | \
+ SSMSK(D0I3_MASK, PMU_ADC_LSS_42-32) | \
+ SSMSK(D0I3_MASK, PMU_CHARGER_LSS_43-32) | \
+ SSMSK(D0I3_MASK, PMU_SEC_TAPC_LSS_44-32) | \
+ SSMSK(D0I3_MASK, PMU_RTC_LSS_45-32) | \
+ SSMSK(D0I3_MASK, PMU_GPI_LSS_46-32) | \
+ SSMSK(D0I3_MASK, PMU_BCU_LSS_47-32))
+
+#define IGNORE_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_IOSF_OCP_BRG_LSS_53-48) | \
+ SSMSK(D0I3_MASK, PMU_MSIC_RESET_LSS_55-48) | \
+ SSMSK(D0I3_MASK, PMU_SOC_FUSE_LSS_56-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD3_LSS_57-48) | \
+ SSMSK(D0I3_MASK, PMU_SSP4_LSS_58-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD5_LSS_59-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD6_LSS_60-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD7_LSS_61-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD8_LSS_62-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD9_LSS_63-48))
+
+#define IGNORE_S3_WKC0 SSWKC(PMU_AONT_LSS_02)
+#define IGNORE_S3_WKC1 SSWKC(PMU_ADC_LSS_42-32)
+
+/* FIXME:: CVT Platform gives SRAM Error if SRAM is put in D0i3 */
+#define S0I3_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0I3_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_RAM_LSS_25-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0I3_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0I3_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM2_LSS_50-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+ SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+#define S0I1_SSS0 S0I3_SSS0
+#define S0I1_SSS1 S0I3_SSS1
+#define S0I1_SSS2 S0I3_SSS2
+#define S0I1_SSS3 S0I3_SSS3
+
+#define LPMP3_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM2_LSS_50-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+ SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+extern void pmu_set_s0ix_possible(int state);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern int mdfld_clv_nc_set_power_state(int, int, int, int *);
+
+#endif
--- /dev/null
+/*
+ * intel_soc_debug.c - This driver provides utility debug api's
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_soc_debug.h>
+
+/* This module currently only supports Intel Tangier
+ * and Anniedale SOCs (CONFIG_INTEL_DEBUG_FEATURE will
+ * only be set in i386_mrfl_defconfig and i386_moor_defconfig).
+ * In addition, a platform check is done in soc_debug_init()
+ * to make sure that this module is only used by appropriate
+ * platforms.
+ */
+#define PGRR_BASE 0xff03a0bc
+#define MAX_MODE_NUMBER 9
+#define MAX_DEBUG_NUMBER 5
+
+static struct dentry *dfs_entry;
+
+enum pgrr_mode {
+ manufacturing_mode = 0x0F,
+ production_mode = 0x07,
+ intel_production_mode = 0x04,
+ oem_production_mode = 0x05,
+ gfx_production_mode = 0x0E,
+ end_user_mode = 0x0B,
+ intel_end_user_mode = 0x08,
+ rma_mode = 0x03,
+ permanent_mode = 0x00
+};
+
+static struct debug_mode {
+ enum pgrr_mode mode;
+ u32 bitmask;
+ char *name;
+} asset_array[] = {
+ { manufacturing_mode,
+ DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "ManufacturingMode",
+ },
+ { production_mode,
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "ProductionMode",
+ },
+ { intel_production_mode,
+ DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "IntelProductionMode",
+ },
+ { oem_production_mode,
+ DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "OemProductionMode",
+ },
+ { gfx_production_mode,
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "GfxProductionMode",
+ },
+ { intel_end_user_mode,
+ DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "IntelEndUserMode",
+ },
+ { end_user_mode,
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "EndUserMode",
+ },
+ { rma_mode,
+ DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "RmaMode",
+ },
+ { permanent_mode,
+ DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+ "PermanentMode",
+ }
+};
+
+static int debug_mode_idx; /* index in asset_array */
+
+static struct debug_feature {
+ u32 bit;
+ char *name;
+} debug_feature_array[] = {
+ { DEBUG_FEATURE_PTI,
+ "PTI",
+ },
+ { DEBUG_FEATURE_RTIT,
+ "RTIT",
+ },
+ { DEBUG_FEATURE_LAKEMORE,
+ "LAKERMORE",
+ },
+ { DEBUG_FEATURE_SOCHAPS,
+ "SOCHAPS",
+ },
+ { DEBUG_FEATURE_USB3DFX,
+ "USB3DFX",
+ },
+};
+
+int cpu_has_debug_feature(u32 bit)
+{
+ if (asset_array[debug_mode_idx].bitmask & bit)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(cpu_has_debug_feature);
+
+static int show_debug_feature(struct seq_file *s, void *unused)
+{
+ int i = 0;
+
+ if (debug_mode_idx >= 0 && (debug_mode_idx < MAX_MODE_NUMBER)) {
+ seq_printf(s, "Profile: %s\n",
+ asset_array[debug_mode_idx].name);
+
+ for (i = 0; i < MAX_DEBUG_NUMBER; i++)
+ if (cpu_has_debug_feature(debug_feature_array[i].bit))
+ seq_printf(s, "%s\n",
+ debug_feature_array[i].name);
+ }
+
+ return 0;
+}
+
+static int debug_feature_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_debug_feature, NULL);
+}
+
+static const struct file_operations debug_feature_ops = {
+ .open = debug_feature_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init soc_debug_init(void)
+{
+ u32 __iomem *pgrr;
+ int i = 0;
+ enum pgrr_mode soc_debug_setting = 0;
+
+ if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) &&
+ (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_ANNIEDALE))
+ return -EINVAL;
+
+ /* Read Policy Generator Result Register */
+ pgrr = ioremap_nocache(PGRR_BASE, sizeof(u32));
+ if (pgrr == NULL)
+ return -EFAULT;
+
+ pr_info("pgrr = %08x\n", *pgrr);
+ soc_debug_setting = *pgrr & 0x0F;
+ iounmap(pgrr);
+
+ for (i = 0; i < MAX_MODE_NUMBER; i++)
+ if (asset_array[i].mode == soc_debug_setting)
+ break;
+
+ if (i == MAX_MODE_NUMBER)
+ return -EFAULT;
+
+ debug_mode_idx = i;
+
+ dfs_entry = debugfs_create_file("debug_feature", S_IFREG | S_IRUGO,
+ NULL, NULL, &debug_feature_ops);
+
+ return 0;
+}
+arch_initcall(soc_debug_init);
+
+void __exit soc_debug_exit(void)
+{
+ debugfs_remove(dfs_entry);
+}
+module_exit(soc_debug_exit);
--- /dev/null
+/*
+ * intel_soc_dump.c - This driver provides a debugfs interface to read or
+ * write any registers inside the SoC. Supported access methods are:
+ * mmio, msg_bus, pci and i2c.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ * Author: Bin Gao <bin.gao@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Two files are created in debugfs root folder: dump_cmd and dump_output.
+ * Echo a dump command to the file dump_cmd, and then cat the file dump_output.
+ * Even for write command, you still have to run "cat dump_output", otherwise
+ * the data will not be really written.
+ *
+ * It works like this:
+ * $ echo "dump command" > dump_cmd
+ * $ cat dump_output
+ *
+ * I/O memory read: echo "r[1|2|4] mmio <addr> [<len>]" > dump_cmd
+ * e.g. echo "r mmio 0xff180000" > dump_cmd
+ *
+ * I/O memory write: echo "w[1|2|4] <addr> <val>" > dump_cmd
+ * e.g. echo "w mmio 0xff190000 0xf0107a08" > dump_cmd
+ *
+ * I/O port read: echo "r[1|2|4] port <port>" > dump_cmd
+ * e.g. echo "r port 0xcf8" > dump_cmd
+ *
+ * I/O port write: echo "w[1|2|4] <port> <val>" > dump_cmd
+ * e.g. echo "w4 port 0xcfc 0x80002188" > dump_cmd
+ *
+ * message bus read: echo "r msg_bus <port> <addr> [<len>]" > dump_cmd
+ * e.g. echo "r msg_bus 0x02 0x30" > dump_cmd
+ *
+ * message bus write: echo "w msg_bus <port> <addr> <val>" > dump_cmd
+ * e.g. echo "w msg_bus 0x02 0x30 0x1020003f" > dump_cmd
+ *
+ * pci config read: echo "r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]" >
+ * dump_cmd
+ * e.g. echo "r1 pci 0 2 0 0x20" > dump_cmd
+ *
+ * pci config write: echo "w[1|2|4] pci <bus> <dev> <func> <reg> <value>" >
+ * dump_cmd
+ * e.g. echo "w pci 0 2 0 0x20 0x380020f3" > dump_cmd
+ *
+ * msr read: echo "r[4|8] msr [<cpu>|all] <reg>" > dump_cmd
+ * read cab be 32bit(r4) or 64bit(r8), default is r8 (=r)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ * e.g. echo "r msr 0 0xcd" > dump_cmd
+ * (read all cpu's msr reg 0xcd in 64bit mode)
+ *
+ * msr write: echo "w[4|8] msr [<cpu>|all] <reg> <val>" > dump_cmd
+ * write cab be 32bit(w4) or 64bit(w8), default is w8 (=w)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ * e.g. echo "w msr 1 289 0xf03090a0cc73be64" > dump_cmd
+ * (write value 0xf03090a0cc73be64 to cpu 1's msr reg 289 in 64bit mode)
+ *
+ * i2c read: echo "r i2c <bus> <addr>" > dump_cmd
+ * e.g. echo "r i2c 1 0x3e" > dump_cmd
+ *
+ * i2c write: echo "w i2c <bus> <addr> <val>" > dump_cmd
+ * e.g. echo "w i2c 2 0x70 0x0f" > dump_cmd
+ *
+ * SCU indirect memory read: echo "r[4] scu <addr>" > dump_cmd
+ * e.g. echo "r scu 0xff108194" > dump_cmd
+ *
+ * SCU indirect memory write: echo "w[4] scu <addr> <val>" > dump_cmd
+ * e.g. echo "w scu 0xff108194 0x03000001" > dump_cmd
+ *
+ * SCU indirect read/write is limited to those addresses in
+ * IndRdWrValidAddrRange array in SCU FW.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <asm/uaccess.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#define MAX_CMDLEN 96
+#define MAX_ERRLEN 255
+#define MIN_ARGS_NUM 3
+#define MAX_ARGS_NUM 8
+#define MAX_MMIO_PCI_LEN 4096
+#define MAX_MSG_BUS_LEN 64
+
+#define ACCESS_WIDTH_DEFAULT 0
+#define ACCESS_WIDTH_8BIT 1
+#define ACCESS_WIDTH_16BIT 2
+#define ACCESS_WIDTH_32BIT 4
+#define ACCESS_WIDTH_64BIT 8
+
+#define ACCESS_BUS_MMIO 1 /* I/O memory */
+#define ACCESS_BUS_PORT 2 /* I/O port */
+#define ACCESS_BUS_MSG_BUS 3 /* message bus */
+#define ACCESS_BUS_PCI 4 /* PCI bus */
+#define ACCESS_BUS_MSR 5 /* MSR registers */
+#define ACCESS_BUS_I2C 6 /* I2C bus */
+#define ACCESS_BUS_SCU_INDRW 7 /* SCU indirect read/write */
+
+#define ACCESS_DIR_READ 1
+#define ACCESS_DIR_WRITE 2
+
+#define RP_INDIRECT_READ 0x02 /* MSG_ID for indirect read via SCU */
+#define RP_INDIRECT_WRITE 0x05 /* MSG_ID for indirect write via SCU */
+
+#define SHOW_NUM_PER_LINE (32 / access_width)
+#define LINE_WIDTH (access_width * SHOW_NUM_PER_LINE)
+#define IS_WHITESPACE(c) ((c) == ' ' || (c) == '\t' || (c) == '\n')
+#define ADDR_RANGE(start, size, addr) \
+ ((addr >= start) && (addr < (start + size)))
+
+/* mmio <--> device map */
+struct mmio_pci_map {
+ u32 start;
+ size_t size;
+ u32 pci_bus:8;
+ u32 pci_dev:8;
+ u32 pci_func:8;
+ char name[24];
+};
+
+static struct dentry *dump_cmd_dentry, *dump_output_dentry;
+static int dump_cmd_was_set;
+static char dump_cmd_buf[MAX_CMDLEN], err_buf[MAX_ERRLEN + 1];
+
+static int access_dir, access_width, access_bus, access_len;
+static u32 access_value;
+static u64 access_value_64;
+
+/* I/O memory */
+static u32 mmio_addr;
+
+/* I/O port */
+static unsigned port_addr;
+
+/* msg_bus */
+static u8 msg_bus_port;
+static u32 msg_bus_addr;
+
+/* pci */
+static u8 pci_bus, pci_dev, pci_func;
+static u16 pci_reg;
+
+/* msr */
+static int msr_cpu;
+static u32 msr_reg;
+
+/* i2c */
+static u8 i2c_bus;
+static u32 i2c_addr;
+
+/* scu */
+static u32 scu_addr;
+
+static const struct mmio_pci_map soc_pnw_map[] = {
+ { 0xff128000, 0x400, 0, 0, 1, "SPI0" },
+ { 0xff128400, 0x400, 0, 0, 2, "SPI1" },
+ { 0xff128800, 0x400, 0, 2, 4, "SPI2" },
+
+ { 0xff12a000, 0x400, 0, 0, 3, "I2C0" },
+ { 0xff12a400, 0x400, 0, 0, 4, "I2C1" },
+ { 0xff12a800, 0x400, 0, 0, 5, "I2C2" },
+ { 0xff12ac00, 0x400, 0, 3, 2, "I2C3" },
+ { 0xff12b000, 0x400, 0, 3, 3, "I2C4" },
+ { 0xff12b400, 0x400, 0, 3, 4, "I2C5" },
+
+ { 0xffae5800, 0x400, 0, 2, 7, "SSP0" },
+ { 0xffae6000, 0x400, 0, 1, 4, "SSP1" },
+ { 0xffae6400, 0x400, 0, 1, 3, "SSP2" },
+ { 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+
+ { 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+ { 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+
+ { 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+ { 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+ { 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+
+ { 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+ { 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+
+ { 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+ { 0xff12c000, 0x800, 0, 2, 1, "GPIO0" },
+ { 0xff12c800, 0x800, 0, 3, 5, "GPIO1" },
+ { 0xff12b800, 0x800, 0, 2, 5, "GP DMA" },
+
+ { 0xffa58000, 0x100, 0, 4, 0, "SDIO0(HC2)" },
+ { 0xffa5c000, 0x100, 0, 4, 1, "SDIO1(HC1a)" },
+ { 0xffa2a000, 0x100, 0, 4, 2, "SDIO3(HC1b)" },
+ { 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0(HC0a)" },
+ { 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1(HC0b)" },
+
+ { 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+ { 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+ { 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+ { 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+ { 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+
+ /* no address assigned: { 0x0, 0, 0, 6, 1, "xx" }, */
+
+ { 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+ { 0xffa29800, 0x800, 0, 6, 4, "HSI DMA" },
+};
+
+static const struct mmio_pci_map soc_clv_map[] = {
+ { 0xff138000, 0x400, 0, 0, 3, "I2C0" },
+ { 0xff139000, 0x400, 0, 0, 4, "I2C1" },
+ { 0xff13a000, 0x400, 0, 0, 5, "I2C2" },
+ { 0xff13b000, 0x400, 0, 3, 2, "I2C3" },
+ { 0xff13c000, 0x400, 0, 3, 3, "I2C4" },
+ { 0xff13d000, 0x400, 0, 3, 4, "I2C5" },
+
+ { 0xff128000, 0x400, 0, 0, 1, "SPI0/MSIC" },
+ { 0xff135000, 0x400, 0, 0, 2, "SPI1" },
+ { 0xff136000, 0x400, 0, 2, 4, "SPI2" },
+ /* invisible to IA: { 0xff137000, 0, -1, -1, -1, "SPI3" }, */
+
+ { 0xffa58000, 0x100, 0, 4, 0, "SDIO0 (HC2)" },
+ { 0xffa48000, 0x100, 0, 4, 1, "SDIO1 (HC1a)" },
+ { 0xffa4c000, 0x100, 0, 4, 2, "SDIO2 (HC1b)" },
+ { 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+ { 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+ { 0xff119000, 0x800, 0, 2, 1, "GPIO0" },
+ { 0xff13f000, 0x800, 0, 3, 5, "GPIO1" },
+ { 0xff13e000, 0x800, 0, 2, 5, "GP DMA" },
+
+ { 0xffa20000, 0x400, 0, 2, 7, "SSP0" },
+ { 0xffa21000, 0x400, 0, 1, 4, "SSP1" },
+ { 0xffa22000, 0x400, 0, 1, 3, "SSP2" },
+ /* invisible to IA: { 0xffa23000, 0, -1, -1, -1, "SSP3" }, */
+
+ /* invisible to IA: { 0xffaf8000, 0, -1, -1, -1, "LPE DMA0" }, */
+ { 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+ { 0xffae8000, 0x1000, 0, 1, 3, "LPE SHIM" },
+ /* { 0xffae9000, 0, 0, 6, 5, "VIBRA" }, LPE SHIM BASE + 0x1000 */
+
+ { 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+ { 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+ { 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+ { 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+ { 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+ { 0xffa2a000, 0x800, 0, 6, 4, "HSI DMA" },
+
+ { 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+ { 0xffa80000, 0x60000, 0, 6, 5, "USB SPH" },
+
+ { 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+
+ { 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+ { 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+ { 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+ /* No address assigned: { 0x0, 0, 0, 6, 1, "HDMI HOTPLUG" }, */
+
+ { 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+ { 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+ { 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+ { 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+};
+
+static const struct mmio_pci_map soc_tng_map[] = {
+ /* I2C0 is reserved for SCU<-->PMIC communication */
+ { 0xff18b000, 0x400, 0, 8, 0, "I2C1" },
+ { 0xff18c000, 0x400, 0, 8, 1, "I2C2" },
+ { 0xff18d000, 0x400, 0, 8, 2, "I2C3" },
+ { 0xff18e000, 0x400, 0, 8, 3, "I2C4" },
+ { 0xff18f000, 0x400, 0, 9, 0, "I2C5" },
+ { 0xff190000, 0x400, 0, 9, 1, "I2C6" },
+ { 0xff191000, 0x400, 0, 9, 2, "I2C7" },
+
+ /* SDIO controllers number: 4 (compared to 5 of PNW/CLV) */
+ { 0xff3fa000, 0x100, 0, 1, 2, "SDIO0 (HC2)" },
+ { 0xff3fb000, 0x100, 0, 1, 3, "SDIO1 (HC1a)" },
+ { 0xff3fc000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+ { 0xff3fd000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+ /* GPIO0 and GPIO1 are merged to one GPIO controller in TNG */
+ { 0xff008000, 0x1000, 0, 12, 0, "GPIO" },
+ { 0xff192000, 0x1000, 0, 21, 0, "GP DMA" },
+
+ /* SSP Audio: SSP0: Modem, SSP1: Audio Codec, SSP2: Bluetooth */
+
+ /* LPE */
+ { 0xff340000, 0x4000, 0, 13, 0, "LPE SHIM" },
+ { 0xff344000, 0x1000, 0, 13, 0, "MAILBOX RAM" },
+ { 0xff2c0000, 0x14000, 0, 13, 0, "ICCM" },
+ { 0xff300000, 0x28000, 0, 13, 0, "DCCM" },
+ { 0xff298000, 0x4000, 0, 14, 0, "LPE DMA0" },
+ /* invisible to IA: { 0xff29c000, 0x4000, -1, -1, -1, "LPE DMA1" }, */
+
+
+ /* SSP SC: SSP4: used by SCU for SPI Debug Card */
+ /* invisible to IA: { 0xff00e000, 0x1000, -1, -1, -1, "SSP SC" }, */
+
+ /* SSP General Purpose */
+ { 0xff188000, 0x1000, 0, 7, 0, "SSP3" },
+ { 0xff189000, 0x1000, 0, 7, 1, "SSP5" },
+ { 0xff18a000, 0x1000, 0, 7, 2, "SSP6" },
+
+ /* UART */
+ { 0xff010080, 0x80, 0, 4, 1, "UART0" },
+ { 0xff011000, 0x80, 0, 4, 2, "UART1" },
+ { 0xff011080, 0x80, 0, 4, 3, "UART2" },
+ { 0xff011400, 0x400, 0, 5, 0, "UART DMA" },
+
+ /* HSI */
+ { 0xff3f8000, 0x1000, 0, 10, 0, "HSI" },
+
+ /* USB */
+ { 0xf9040000, 0x20000, 0, 15, 0, "USB2 OTG" },
+ { 0xf9060000, 0x20000, 0, 16, 0, "USB2 MPH/HSIC" },
+ { 0xf9100000, 0x100000, 0, 17, 0, "USB3 OTG" },
+ /* { 0xf90f0000, 0x1000, -1, -1, -1, "USB3 PHY" }, */
+ /* { 0xf90a0000, 0x10000, -1, -1, -1, "USB3 DMA FETCH" }, */
+
+ /* Security/Chaabi */
+ { 0xf9030000, 0x1000, 0, 11, 0, "SEP SECURITY" },
+
+ /* Graphics/Display */
+ { 0xc0000000, 0x2000000, 0, 2, 0, "GVD BAR0" },
+ { 0x80000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+
+ /* ISP */
+ { 0xc2000000, 0x400000, 0, 3, 0, "ISP" },
+
+ /* PTI */
+ { 0xf9009000, 0x1000, 0, 18, 0, "PTI STM" },
+ { 0xf90a0000, 0x10000, 0, 18, 0, "PTI USB3 DMA FETCH" },
+ { 0xfa000000, 0x1000000, 0, 18, 0, "PTI APERTURE A" },
+
+ { 0xff009000, 0x1000, 0, 19, 0, "SCU-IA IPC" },
+ { 0xff00b000, 0x1000, 0, 20, 0, "PMU" },
+};
+
+static struct pci_dev *mmio_to_pci(u32 addr, char **name)
+{
+ int i, count;
+ struct mmio_pci_map *map;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) {
+ count = ARRAY_SIZE(soc_pnw_map);
+ map = (struct mmio_pci_map *) &soc_pnw_map[0];
+ } else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ count = ARRAY_SIZE(soc_clv_map);
+ map = (struct mmio_pci_map *) &soc_clv_map[0];
+ } else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ count = ARRAY_SIZE(soc_tng_map);
+ map = (struct mmio_pci_map *) &soc_tng_map[0];
+ } else {
+ return NULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (ADDR_RANGE(map[i].start, map[i].size, addr))
+ break;
+ }
+
+ if (i >= count)
+ return NULL;
+
+ *name = &map[i].name[0];
+ return pci_get_bus_and_slot(map[i].pci_bus,
+ PCI_DEVFN(map[i].pci_dev, map[i].pci_func));
+}
+
+static int parse_argument(char *input, char **args)
+{
+ int count, located;
+ char *p = input;
+ int input_len = strlen(input);
+
+ count = 0;
+ located = 0;
+ while (*p != 0) {
+ if (p - input >= input_len)
+ break;
+
+ /* Locate the first character of a argument */
+ if (!IS_WHITESPACE(*p)) {
+ if (!located) {
+ located = 1;
+ args[count++] = p;
+ if (count > MAX_ARGS_NUM)
+ break;
+ }
+ } else {
+ if (located) {
+ *p = 0;
+ located = 0;
+ }
+ }
+ p++;
+ }
+
+ return count;
+}
+
+static int dump_cmd_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, dump_cmd_buf);
+ return 0;
+}
+
+static int dump_cmd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dump_cmd_show, NULL);
+}
+
+static int parse_mmio_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if (arg_num < 3) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: r[1|2|4] <mmio> <addr> [<len>]\n"
+ " w[1|2|4] <mmio> <addr> <val>\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_32BIT;
+
+ ret = kstrtou32(arg_list[2], 0, &mmio_addr);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid mmio address %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_32BIT) &&
+ (mmio_addr % 4)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "addr %x is not 4 bytes aligned!\n",
+ mmio_addr);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_16BIT) &&
+ (mmio_addr % 2)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "addr %x is not 2 bytes aligned!\n",
+ mmio_addr);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (arg_num == 4) {
+ ret = kstrtou32(arg_list[3], 0, &access_len);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid mmio read length %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+ } else if (arg_num > 4) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "usage: r[1|2|4] mmio <addr> "
+ "[<len>]\n");
+ goto failed;
+ }
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (arg_num != 4) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "need exact 4 arguments for "
+ "mmio write.\n");
+ goto failed;
+ }
+ ret = kstrtou32(arg_list[3], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid mmio address %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_port_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if (arg_num < 2) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: r[1|2|4] port <port>\n"
+ " w[1|2|4] port <port> <val>\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_8BIT;
+
+ ret = kstrtou16(arg_list[2], 0, (u16 *)&port_addr);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid port address %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_32BIT) &&
+ (port_addr % ACCESS_WIDTH_32BIT)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "port %x is not 4 bytes aligned!\n", port_addr);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_16BIT) &&
+ (port_addr % ACCESS_WIDTH_16BIT)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "port %x is not 2 bytes aligned!\n", port_addr);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (arg_num != 3) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "usage: r[1|2|4] port <port>\n");
+ goto failed;
+ }
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (arg_num != 4) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "need exact 4 arguments for port write.\n");
+ goto failed;
+ }
+ ret = kstrtou32(arg_list[3], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid value %s\n", arg_list[3]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_msg_bus_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if (arg_num < 4) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: r msg_bus <port> <addr> [<len>]\n"
+ " w msg_bus <port> <addr> <val>\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_32BIT;
+
+ if (access_width != ACCESS_WIDTH_32BIT) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "only 32bit read/write are supported.\n");
+ goto failed;
+ }
+
+ ret = kstrtou8(arg_list[2], 0, &msg_bus_port);
+ if (ret || msg_bus_port > 255) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus port %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ ret = kstrtou32(arg_list[3], 0, &msg_bus_addr);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus address %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (arg_num == 5) {
+ ret = kstrtou32(arg_list[4], 0, &access_len);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid msg_bus read length %s\n",
+ arg_list[4]);
+ goto failed;
+ }
+ } else if (arg_num > 5) {
+ snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+ "usage: r[1|2|4] msg_bus "
+ "<port> <addr> [<len>]\n");
+ goto failed;
+ }
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (arg_num != 5) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: w msg_bus <port> <addr> <val>]\n");
+ goto failed;
+ }
+ ret = kstrtou32(arg_list[4], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid value for msg_bus write %s\n",
+ arg_list[4]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_pci_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if (arg_num < 6) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]\n"
+ " w[1|2|4] pci <bus> <dev> <func> <reg> <val>\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_32BIT;
+
+ ret = kstrtou8(arg_list[2], 0, &pci_bus);
+ if (ret || pci_bus > 255) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid pci bus %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ ret = kstrtou8(arg_list[3], 0, &pci_dev);
+ if (ret || pci_dev > 255) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid pci device %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+
+ ret = kstrtou8(arg_list[4], 0, &pci_func);
+ if (ret || pci_func > 255) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid pci function %s\n",
+ arg_list[4]);
+ goto failed;
+ }
+
+ ret = kstrtou16(arg_list[5], 0, &pci_reg);
+ if (ret || pci_reg > 4 * 1024) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid pci register %s\n",
+ arg_list[5]);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_32BIT) && (pci_reg % 4)) {
+ snprintf(err_buf, MAX_ERRLEN, "reg %x is not 4 bytes aligned!\n"
+ , (u32) pci_reg);
+ goto failed;
+ }
+
+ if ((access_width == ACCESS_WIDTH_16BIT) && (pci_reg % 2)) {
+ snprintf(err_buf, MAX_ERRLEN, "reg %x is not 2 bytes aligned\n",
+ pci_reg);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (arg_num == 7) {
+ ret = kstrtou32(arg_list[6], 0, &access_len);
+ if (ret || access_len > 4 * 1024) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid pci read length %s\n",
+ arg_list[6]);
+ return ret;
+ }
+ } else if (arg_num > 7) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "max 7 args are allowed for pci read\n"
+ "usage: r[1|2|4] pci <bus> <dev> <func> "
+ "<reg> [<len>]\n");
+ goto failed;
+ }
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (arg_num != 7) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "need exact 7 args for pci write.\n");
+ goto failed;
+ }
+ ret = kstrtou32(arg_list[6], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid value for pci write %s\n",
+ arg_list[6]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_msr_args(char **arg_list, int arg_num)
+{
+ int ret, arg_reg, arg_val;
+
+ if (((access_dir == ACCESS_DIR_READ) && (arg_num < 3)) ||
+ ((access_dir == ACCESS_DIR_WRITE) && (arg_num < 4))) {
+ snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+ "usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+ " w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+ goto failed;
+ }
+
+ if (((access_dir == ACCESS_DIR_READ) && (arg_num > 4)) ||
+ ((access_dir == ACCESS_DIR_WRITE) && (arg_num > 5))) {
+ snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+ "usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+ " w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_64BIT;
+
+ if (!strncmp(arg_list[2], "all", 3)) {
+ msr_cpu = -1;
+ arg_reg = 3;
+ arg_val = 4;
+ } else if ((access_dir == ACCESS_DIR_READ && arg_num == 4) ||
+ (access_dir == ACCESS_DIR_WRITE && arg_num == 5)) {
+ ret = kstrtou32(arg_list[2], 0, &msr_cpu);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid cpu: %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+ arg_reg = 3;
+ arg_val = 4;
+ } else {
+ /* Default cpu for msr read is all, for msr write is 0 */
+ if (access_dir == ACCESS_DIR_READ)
+ msr_cpu = -1;
+ else
+ msr_cpu = 0;
+ arg_reg = 2;
+ arg_val = 3;
+ }
+
+
+ ret = kstrtou32(arg_list[arg_reg], 0, &msr_reg);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid msr reg: %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (access_width == ACCESS_WIDTH_32BIT)
+ ret = kstrtou32(arg_list[arg_val], 0, &access_value);
+ else
+ ret = kstrtou64(arg_list[arg_val], 0, &access_value_64);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid value: %s\n",
+ arg_list[arg_val]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_i2c_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if ((access_dir == ACCESS_DIR_READ && arg_num != 4) ||
+ (access_dir == ACCESS_DIR_WRITE && arg_num != 5)) {
+ snprintf(err_buf, MAX_ERRLEN, "usage: r i2c <bus> <addr>\n"
+ " w i2c <bus> <addr> <val>\n");
+ goto failed;
+ }
+
+ if (access_width == ACCESS_WIDTH_DEFAULT)
+ access_width = ACCESS_WIDTH_8BIT;
+
+ if (access_width != ACCESS_WIDTH_8BIT) {
+ snprintf(err_buf, MAX_ERRLEN, "only 8bit access is allowed\n");
+ goto failed;
+ }
+
+ ret = kstrtou8(arg_list[2], 0, &i2c_bus);
+ if (ret || i2c_bus > 9) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid i2c bus %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ ret = kstrtou32(arg_list[3], 0, &i2c_addr);
+
+ pr_err("ret = %d, i2c_addr is 0x%x\n", ret, i2c_addr);
+ if (ret || (i2c_addr > 1024)) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid i2c address %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ ret = kstrtou32(arg_list[4], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid value for i2c write %s\n",
+ arg_list[4]);
+ goto failed;
+ }
+ }
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static int parse_scu_args(char **arg_list, int arg_num)
+{
+ int ret;
+
+ if (access_width != ACCESS_WIDTH_32BIT)
+ access_width = ACCESS_WIDTH_32BIT;
+
+ ret = kstrtou32(arg_list[2], 0, &scu_addr);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN, "invalid scu address %s\n",
+ arg_list[2]);
+ goto failed;
+ }
+
+ if (scu_addr % 4) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "addr %x is not 4 bytes aligned!\n",
+ scu_addr);
+ goto failed;
+ }
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (arg_num != 3) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "usage: r[4] scu <addr>\n");
+ goto failed;
+ }
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ if (arg_num != 4) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "usage: w[4] scu <addr> <val>\n");
+ goto failed;
+ }
+ ret = kstrtou32(arg_list[3], 0, &access_value);
+ if (ret) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid scu write value %s\n",
+ arg_list[3]);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+static ssize_t dump_cmd_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ char cmd[MAX_CMDLEN];
+ char *arg_list[MAX_ARGS_NUM];
+ int arg_num, ret = -EINVAL;
+
+ err_buf[0] = 0;
+
+ if (len >= MAX_CMDLEN) {
+ snprintf(err_buf, MAX_ERRLEN, "input command is too long.\n"
+ "max allowed input length is %d\n",
+ MAX_CMDLEN);
+ goto done;
+ }
+
+ if (copy_from_user(cmd, buf, len)) {
+ snprintf(err_buf, MAX_ERRLEN, "copy_from_user() failed.\n");
+ goto done;
+ }
+ cmd[len] = 0;
+
+ dump_cmd_buf[0] = 0;
+ strncpy(dump_cmd_buf, cmd, len);
+ dump_cmd_buf[len] = 0;
+
+ arg_num = parse_argument(cmd, arg_list);
+ if (arg_num < MIN_ARGS_NUM) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid command(too few arguments): "
+ "%s\n", dump_cmd_buf);
+ goto done;
+ }
+ if (arg_num > MAX_ARGS_NUM) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "invalid command(too many arguments): "
+ "%s\n", dump_cmd_buf);
+ goto done;
+ }
+
+ /* arg 1: direction(read/write) and mode (8/16/32/64 bit) */
+ if (!strncmp(arg_list[0], "r8", 2)) {
+ access_dir = ACCESS_DIR_READ;
+ access_width = ACCESS_WIDTH_64BIT;
+ } else if (!strncmp(arg_list[0], "r4", 2)) {
+ access_dir = ACCESS_DIR_READ;
+ access_width = ACCESS_WIDTH_32BIT;
+ } else if (!strncmp(arg_list[0], "r2", 2)) {
+ access_dir = ACCESS_DIR_READ;
+ access_width = ACCESS_WIDTH_16BIT;
+ } else if (!strncmp(arg_list[0], "r1", 2)) {
+ access_dir = ACCESS_DIR_READ;
+ access_width = ACCESS_WIDTH_8BIT;
+ } else if (!strncmp(arg_list[0], "r", 1)) {
+ access_dir = ACCESS_DIR_READ;
+ access_width = ACCESS_WIDTH_DEFAULT;
+ } else if (!strncmp(arg_list[0], "w8", 2)) {
+ access_dir = ACCESS_DIR_WRITE;
+ access_width = ACCESS_WIDTH_64BIT;
+ } else if (!strncmp(arg_list[0], "w4", 2)) {
+ access_dir = ACCESS_DIR_WRITE;
+ access_width = ACCESS_WIDTH_32BIT;
+ } else if (!strncmp(arg_list[0], "w2", 2)) {
+ access_dir = ACCESS_DIR_WRITE;
+ access_width = ACCESS_WIDTH_16BIT;
+ } else if (!strncmp(arg_list[0], "w1", 2)) {
+ access_dir = ACCESS_DIR_WRITE;
+ access_width = ACCESS_WIDTH_8BIT;
+ } else if (!strncmp(arg_list[0], "w", 1)) {
+ access_dir = ACCESS_DIR_WRITE;
+ access_width = ACCESS_WIDTH_DEFAULT;
+ } else {
+ snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+ arg_list[0]);
+ goto done;
+ }
+
+ /* arg2: bus type(mmio, msg_bus, pci or i2c) */
+ access_len = 1;
+ if (!strncmp(arg_list[1], "mmio", 4)) {
+ access_bus = ACCESS_BUS_MMIO;
+ ret = parse_mmio_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "port", 4)) {
+ access_bus = ACCESS_BUS_PORT;
+ ret = parse_port_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "msg_bus", 7)) {
+ access_bus = ACCESS_BUS_MSG_BUS;
+ ret = parse_msg_bus_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "pci", 3)) {
+ access_bus = ACCESS_BUS_PCI;
+ ret = parse_pci_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "msr", 3)) {
+ access_bus = ACCESS_BUS_MSR;
+ ret = parse_msr_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "i2c", 3)) {
+ access_bus = ACCESS_BUS_I2C;
+ ret = parse_i2c_args(arg_list, arg_num);
+ } else if (!strncmp(arg_list[1], "scu", 3)) {
+ access_bus = ACCESS_BUS_SCU_INDRW;
+ ret = parse_scu_args(arg_list, arg_num);
+ } else {
+ snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+ arg_list[1]);
+ }
+
+ if (access_len == 0) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "access length must be larger than 0\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((access_bus == ACCESS_BUS_MMIO || access_bus == ACCESS_BUS_PCI) &&
+ (access_len > MAX_MMIO_PCI_LEN)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "%d exceeds max mmio/pci read length(%d)\n",
+ access_len, MAX_MMIO_PCI_LEN);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((access_bus == ACCESS_BUS_MSG_BUS) &&
+ (access_len > MAX_MSG_BUS_LEN)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "%d exceeds max msg_bus read length(%d)\n",
+ access_len, MAX_MSG_BUS_LEN);
+ ret = -EINVAL;
+ }
+
+ if (access_bus == ACCESS_BUS_MSR) {
+ if ((access_width != ACCESS_WIDTH_32BIT) &&
+ (access_width != ACCESS_WIDTH_64BIT) &&
+ (access_width != ACCESS_WIDTH_DEFAULT)) {
+ snprintf(err_buf, MAX_ERRLEN,
+ "only 32bit or 64bit is allowed for msr\n");
+ ret = -EINVAL;
+ }
+ }
+
+done:
+ dump_cmd_was_set = ret ? 0 : 1;
+ return ret ? ret : len;
+}
+
+static int dump_output_show_mmio(struct seq_file *s)
+{
+ void __iomem *base;
+ int i, comp1, comp2;
+ u32 start, end, end_natural;
+ struct pci_dev *pdev;
+ char *name;
+
+ pdev = mmio_to_pci(mmio_addr, &name);
+ if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+ seq_printf(s, "can't put device %s into D0i0 state\n", name);
+ return 0;
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ base = ioremap_nocache(mmio_addr, access_width);
+ if (!base) {
+ seq_printf(s, "can't map physical address: %x\n",
+ mmio_addr);
+ if (pdev)
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+ }
+ switch (access_width) {
+ case ACCESS_WIDTH_8BIT:
+ iowrite8((u8) access_value, base);
+ break;
+ case ACCESS_WIDTH_16BIT:
+ iowrite16((u16) access_value, base);
+ break;
+ case ACCESS_WIDTH_32BIT:
+ case ACCESS_WIDTH_DEFAULT:
+ iowrite32(access_value, base);
+ break;
+ default:
+ break; /* never happen */
+ }
+ seq_printf(s, "write succeeded\n");
+ } else {
+ start = (mmio_addr / LINE_WIDTH) * LINE_WIDTH;
+ end_natural = mmio_addr + (access_len - 1) * access_width;
+ end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+ access_width;
+ comp1 = (mmio_addr - start) / access_width;
+ comp2 = (end - end_natural) / access_width;
+
+ base = ioremap_nocache(start, (comp1 + comp2 +
+ access_len) * access_width);
+ if (!base) {
+ seq_printf(s, "can't map physical address: %x\n",
+ mmio_addr);
+ if (pdev)
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+ }
+
+ for (i = 0; i < comp1 + comp2 + access_len; i++) {
+ if ((i % SHOW_NUM_PER_LINE) == 0)
+ seq_printf(s, "[%08x]", start + i * 4);
+
+ if (i < comp1 || i >= access_len + comp1) {
+ switch (access_width) {
+ case ACCESS_WIDTH_32BIT:
+ seq_printf(s, " ");
+ break;
+ case ACCESS_WIDTH_16BIT:
+ seq_printf(s, " ");
+ break;
+ case ACCESS_WIDTH_8BIT:
+ seq_printf(s, " ");
+ break;
+ }
+
+ } else {
+ switch (access_width) {
+ case ACCESS_WIDTH_32BIT:
+ seq_printf(s, " %08x",
+ ioread32(base + i * 4));
+ break;
+ case ACCESS_WIDTH_16BIT:
+ seq_printf(s, " %04x",
+ (u16) ioread16(base + i * 2));
+ break;
+ case ACCESS_WIDTH_8BIT:
+ seq_printf(s, " %02x",
+ (u8) ioread8(base + i));
+ break;
+ }
+ }
+
+ if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+ seq_printf(s, "\n");
+ }
+ }
+
+ iounmap(base);
+ if (pdev)
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+}
+
+static int dump_output_show_port(struct seq_file *s)
+{
+ if (access_dir == ACCESS_DIR_WRITE) {
+ switch (access_width) {
+ case ACCESS_WIDTH_8BIT:
+ case ACCESS_WIDTH_DEFAULT:
+ outb((u8) access_value, port_addr);
+ break;
+ case ACCESS_WIDTH_16BIT:
+ outw((u16) access_value, port_addr);
+ break;
+ case ACCESS_WIDTH_32BIT:
+ outl(access_value, port_addr);
+ break;
+ default:
+ break; /* never happen */
+ }
+ seq_printf(s, "write succeeded\n");
+ } else {
+ switch (access_width) {
+ case ACCESS_WIDTH_32BIT:
+ seq_printf(s, " %08x\n", inl(port_addr));
+ break;
+ case ACCESS_WIDTH_16BIT:
+ seq_printf(s, " %04x\n", (u16) inw(port_addr));
+ break;
+ case ACCESS_WIDTH_8BIT:
+ seq_printf(s, " %02x\n", (u8) inb(port_addr));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int dump_output_show_msg_bus(struct seq_file *s)
+{
+ int i, comp1, comp2;
+ u32 start, end, end_natural;
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ intel_mid_msgbus_write32(msg_bus_port,
+ msg_bus_addr, access_value);
+ seq_printf(s, "write succeeded\n");
+ } else {
+ start = (msg_bus_addr / LINE_WIDTH) * LINE_WIDTH;
+ end_natural = msg_bus_addr + (access_len - 1) * access_width;
+ end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+ access_width;
+ comp1 = (msg_bus_addr - start) / access_width;
+ comp2 = (end - end_natural) / access_width;
+
+ for (i = 0; i < comp1 + comp2 + access_len; i++) {
+ if ((i % SHOW_NUM_PER_LINE) == 0)
+ seq_printf(s, "[%08x]", start + i * 4);
+
+ if (i < comp1 || i >= access_len + comp1)
+ seq_printf(s, " ");
+
+ else
+ seq_printf(s, " %08x", intel_mid_msgbus_read32(
+ msg_bus_port, msg_bus_addr + i));
+
+ if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+ seq_printf(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int dump_output_show_pci(struct seq_file *s)
+{
+ int i, comp1, comp2;
+ u32 start, end, end_natural, val;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_bus_and_slot(pci_bus, PCI_DEVFN(pci_dev, pci_func));
+ if (!pdev) {
+ seq_printf(s, "pci bus %d:%d:%d doesn't exist\n",
+ pci_bus, pci_dev, pci_func);
+ return 0;
+ }
+
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ seq_printf(s, "can't put pci device %d:%d:%d into D0i0 state\n",
+ pci_bus, pci_dev, pci_func);
+ return 0;
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ switch (access_width) {
+ case ACCESS_WIDTH_8BIT:
+ pci_write_config_byte(pdev, (int)pci_reg,
+ (u8)access_value);
+ break;
+ case ACCESS_WIDTH_16BIT:
+ pci_write_config_word(pdev, (int)pci_reg,
+ (u16)access_value);
+ break;
+ case ACCESS_WIDTH_32BIT:
+ case ACCESS_WIDTH_DEFAULT:
+ pci_write_config_dword(pdev, (int)pci_reg,
+ access_value);
+ break;
+ default:
+ break; /* never happen */
+ }
+ seq_printf(s, "write succeeded\n");
+ } else {
+ start = (pci_reg / LINE_WIDTH) * LINE_WIDTH;
+ end_natural = pci_reg + (access_len - 1) * access_width;
+ end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+ access_width;
+ comp1 = (pci_reg - start) / access_width;
+ comp2 = (end - end_natural) / access_width;
+
+ for (i = 0; i < comp1 + comp2 + access_len; i++) {
+ if ((i % SHOW_NUM_PER_LINE) == 0)
+ seq_printf(s, "[%08x]", start + i * 4);
+
+ if (i < comp1 || i >= access_len + comp1) {
+ switch (access_width) {
+ case ACCESS_WIDTH_32BIT:
+ seq_printf(s, " ");
+ break;
+ case ACCESS_WIDTH_16BIT:
+ seq_printf(s, " ");
+ break;
+ case ACCESS_WIDTH_8BIT:
+ seq_printf(s, " ");
+ break;
+ }
+
+ } else {
+ switch (access_width) {
+ case ACCESS_WIDTH_32BIT:
+ pci_read_config_dword(pdev,
+ start + i * 4, &val);
+ seq_printf(s, " %08x", val);
+ break;
+ case ACCESS_WIDTH_16BIT:
+ pci_read_config_word(pdev,
+ start + i * 2, (u16 *) &val);
+ seq_printf(s, " %04x", (u16)val);
+ break;
+ case ACCESS_WIDTH_8BIT:
+ pci_read_config_byte(pdev,
+ start + i, (u8 *) &val);
+ seq_printf(s, " %04x", (u8)val);
+ break;
+ }
+ }
+
+ if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+ seq_printf(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int dump_output_show_msr(struct seq_file *s)
+{
+ int ret, i, count;
+ u32 data[2];
+
+ if (access_dir == ACCESS_DIR_READ) {
+ if (msr_cpu < 0) {
+ /* loop for all cpus */
+ i = 0;
+ count = nr_cpu_ids;
+ } else if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+ seq_printf(s, "cpu should be between 0 - %d\n",
+ nr_cpu_ids - 1);
+ return 0;
+ } else {
+ /* loop for one cpu */
+ i = msr_cpu;
+ count = msr_cpu + 1;
+ }
+ for (; i < count; i++) {
+ ret = rdmsr_safe_on_cpu(i, msr_reg, &data[0], &data[1]);
+ if (ret) {
+ seq_printf(s, "msr read error: %d\n", ret);
+ return 0;
+ } else {
+ if (access_width == ACCESS_WIDTH_32BIT)
+ seq_printf(s, "[cpu %1d] %08x\n",
+ i, data[0]);
+ else
+ seq_printf(s, "[cpu %1d] %08x%08x\n",
+ i, data[1], data[0]);
+ }
+ }
+ } else {
+ if (access_width == ACCESS_WIDTH_32BIT) {
+ ret = rdmsr_safe_on_cpu(msr_cpu, msr_reg,
+ &data[0], &data[1]);
+ if (ret) {
+ seq_printf(s, "msr write error: %d\n", ret);
+ return 0;
+ }
+ data[0] = access_value;
+ } else {
+ data[0] = (u32)access_value_64;
+ data[1] = (u32)(access_value_64 >> 32);
+ }
+ if (msr_cpu < 0) {
+ /* loop for all cpus */
+ i = 0;
+ count = nr_cpu_ids;
+ } else {
+ if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+ seq_printf(s, "cpu should be between 0 - %d\n",
+ nr_cpu_ids - 1);
+ return 0;
+ }
+ /* loop for one cpu */
+ i = msr_cpu;
+ count = msr_cpu + 1;
+ }
+ for (; i < count; i++) {
+ ret = wrmsr_safe_on_cpu(i, msr_reg, data[0], data[1]);
+ if (ret) {
+ seq_printf(s, "msr write error: %d\n", ret);
+ return 0;
+ } else {
+ seq_printf(s, "write succeeded.\n");
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int dump_output_show_i2c(struct seq_file *s)
+{
+ int ret;
+ struct i2c_adapter *adap;
+ struct i2c_msg msg;
+ u8 val;
+
+ adap = i2c_get_adapter(i2c_bus);
+ if (!adap) {
+ seq_printf(s, "can't find bus adapter for i2c bus %d\n",
+ i2c_bus);
+ return 0;
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ msg.addr = i2c_addr;
+ msg.len = 1;
+ msg.buf = (u8 *) &access_value;
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret != 1)
+ seq_printf(s, "i2c write error: %d\n", ret);
+ else
+ seq_printf(s, "write succeeded.\n");
+ } else {
+ msg.flags |= I2C_M_RD;
+ msg.addr = i2c_addr;
+ msg.len = 1;
+ msg.buf = &val;
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret != 1)
+ seq_printf(s, "i2c read error: %d\n", ret);
+ else
+ seq_printf(s, "%02x\n", val);
+ }
+
+ return 0;
+}
+
+static int dump_output_show_scu(struct seq_file *s)
+{
+ struct pci_dev *pdev;
+ char *name;
+ int ret;
+ u32 cmd, sub = 0, dptr = 0, sptr = 0;
+ u8 wbuflen = 4, rbuflen = 4;
+ u8 wbuf[16];
+ u8 rbuf[16];
+
+ memset(wbuf, 0, 16);
+ memset(rbuf, 0, 16);
+
+ pdev = mmio_to_pci(scu_addr, &name);
+ if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+ seq_printf(s, "can't put device %s into D0i0 state\n", name);
+ return 0;
+ }
+
+ if (access_dir == ACCESS_DIR_WRITE) {
+ cmd = RP_INDIRECT_WRITE;
+ dptr = scu_addr;
+ wbuf[0] = (u8) (access_value & 0xff);
+ wbuf[1] = (u8) ((access_value >> 8) & 0xff);
+ wbuf[2] = (u8) ((access_value >> 16) & 0xff);
+ wbuf[3] = (u8) ((access_value >> 24) & 0xff);
+
+ ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+ (u32 *)rbuf, rbuflen, dptr, sptr);
+
+ if (ret) {
+ seq_printf(s,
+ "Indirect write failed (check dmesg): "
+ "[%08x]\n", scu_addr);
+ } else {
+ seq_printf(s, "write succeeded\n");
+ }
+ } else if (access_dir == ACCESS_DIR_READ) {
+ cmd = RP_INDIRECT_READ;
+ sptr = scu_addr;
+
+ ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+ (u32 *)rbuf, rbuflen, dptr, sptr);
+
+ if (ret) {
+ seq_printf(s,
+ "Indirect read failed (check dmesg): "
+ "[%08x]\n", scu_addr);
+ } else {
+ access_value = (rbuf[3] << 24) | (rbuf[2] << 16) |
+ (rbuf[1] << 8) | (rbuf[0]);
+ seq_printf(s, "[%08x] %08x\n", scu_addr, access_value);
+ }
+ }
+
+ if (pdev)
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static int dump_output_show(struct seq_file *s, void *unused)
+{
+ int ret = 0;
+
+ if (!dump_cmd_was_set) {
+ seq_printf(s, "%s", err_buf);
+ return 0;
+ }
+
+ switch (access_bus) {
+ case ACCESS_BUS_MMIO:
+ ret = dump_output_show_mmio(s);
+ break;
+ case ACCESS_BUS_PORT:
+ ret = dump_output_show_port(s);
+ break;
+ case ACCESS_BUS_MSG_BUS:
+ ret = dump_output_show_msg_bus(s);
+ break;
+ case ACCESS_BUS_PCI:
+ ret = dump_output_show_pci(s);
+ break;
+ case ACCESS_BUS_MSR:
+ ret = dump_output_show_msr(s);
+ break;
+ case ACCESS_BUS_I2C:
+ ret = dump_output_show_i2c(s);
+ break;
+ case ACCESS_BUS_SCU_INDRW:
+ ret = dump_output_show_scu(s);
+ break;
+ default:
+ seq_printf(s, "unknow bus type: %d\n", access_bus);
+ break;
+
+ }
+
+ return ret;
+}
+
+static const struct file_operations dump_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = dump_cmd_open,
+ .read = seq_read,
+ .write = dump_cmd_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dump_output_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dump_output_show, NULL);
+}
+
+static const struct file_operations dump_output_fops = {
+ .owner = THIS_MODULE,
+ .open = dump_output_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init intel_mid_dump_init(void)
+{
+ dump_cmd_dentry = debugfs_create_file("dump_cmd",
+ S_IFREG | S_IRUGO | S_IWUSR, NULL, NULL, &dump_cmd_fops);
+ dump_output_dentry = debugfs_create_file("dump_output",
+ S_IFREG | S_IRUGO, NULL, NULL, &dump_output_fops);
+ if (!dump_cmd_dentry || !dump_output_dentry) {
+ pr_err("intel_mid_dump: can't create debugfs node\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+module_init(intel_mid_dump_init);
+
+static void __exit intel_mid_dump_exit(void)
+{
+ if (dump_cmd_dentry)
+ debugfs_remove(dump_cmd_dentry);
+ if (dump_output_dentry)
+ debugfs_remove(dump_output_dentry);
+}
+module_exit(intel_mid_dump_exit);
+
+MODULE_DESCRIPTION("Intel Atom SoC register dump driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * intel_soc_mdfld.c - This driver provides utility api's for medfield
+ * platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+/* To CLEAR C6 offload Bit(LSB) in MSR 120 */
+static inline void clear_c6offload_bit(void)
+{
+ u32 msr_low, msr_high;
+
+ rdmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+ msr_low = msr_low & ~MSR_C6OFFLOAD_SET_LOW;
+ msr_high = msr_high & ~MSR_C6OFFLOAD_SET_HIGH;
+ wrmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+}
+
+/* To SET C6 offload Bit(LSB) in MSR 120 */
+static inline void set_c6offload_bit(void)
+{
+ u32 msr_low, msr_high;
+
+ rdmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+ msr_low = msr_low | MSR_C6OFFLOAD_SET_LOW;
+ msr_high = msr_high | MSR_C6OFFLOAD_SET_HIGH;
+ wrmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+}
+
+static bool mfld_pmu_enter(int s0ix_state)
+{
+ u32 s0ix_value;
+ u32 ssw_val;
+ int num_retry = PMU_MISC_SET_TIMEOUT;
+
+ s0ix_value = get_s0ix_val_set_pm_ssc(s0ix_state);
+
+ clear_c6offload_bit();
+
+ /* issue a command to SCU */
+ pmu_set_interrupt_enable();
+ writel(s0ix_value, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+ pmu_log_command(s0ix_value, NULL);
+
+ do {
+ if (readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+ break;
+ udelay(1);
+ } while (--num_retry);
+
+ if (!num_retry && !readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+ WARN(1, "%s: pm_msic not set.\n", __func__);
+
+ num_retry = PMU_C6OFFLOAD_ACCESS_TIMEOUT;
+
+ /* At this point we have committed an S0ix command
+ * will have to wait for the SCU s0ix complete
+ * intertupt to proceed further.
+ */
+ mid_pmu_cxt->s0ix_entered = s0ix_state;
+
+ if (s0ix_value == S0I3_VALUE) {
+ do {
+ ssw_val = readl(mid_pmu_cxt->base_addr.offload_reg);
+ if ((ssw_val & C6OFFLOAD_BIT_MASK) == C6OFFLOAD_BIT) {
+ set_c6offload_bit();
+ break;
+ }
+
+ udelay(1);
+ } while (--num_retry);
+
+ if (unlikely(!num_retry)) {
+ WARN(1, "mid_pmu: error cpu offload bit not set.\n");
+ pmu_stat_clear();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void mfld_pmu_wakeup(void)
+{
+
+ /* Wakeup allother CPU's */
+ if (mid_pmu_cxt->s0ix_entered)
+ apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
+
+ clear_c6offload_bit();
+}
+
+static void mfld_pmu_remove(void)
+{
+ /* Freeing up memory allocated for PMU1 & PMU2 */
+ iounmap(mid_pmu_cxt->base_addr.offload_reg);
+ mid_pmu_cxt->base_addr.offload_reg = NULL;
+
+}
+
+static pci_power_t mfld_pmu_choose_state(int device_lss)
+{
+ pci_power_t state;
+
+ switch (device_lss) {
+ case PMU_SECURITY_LSS_04:
+ state = PCI_D2;
+ break;
+
+ case PMU_USB_OTG_LSS_06:
+ case PMU_USB_HSIC_LSS_07:
+ case PMU_UART2_LSS_41:
+ state = PCI_D1;
+ break;
+
+ default:
+ state = PCI_D3hot;
+ break;
+ }
+
+ return state;
+}
+
+static int mfld_pmu_init(void)
+{
+ int ret = PMU_SUCCESS;
+
+ /* Map the memory of offload_reg */
+ mid_pmu_cxt->base_addr.offload_reg =
+ ioremap_nocache(C6_OFFLOAD_REG_ADDR, 4);
+ if (mid_pmu_cxt->base_addr.offload_reg == NULL) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Unable to map the offload_reg address space\n");
+ ret = PMU_FAILED;
+ goto out_err;
+ }
+
+ mid_pmu_cxt->s3_hint = C6_HINT;
+
+out_err:
+ return ret;
+}
+
+/**
+ * platform_set_pmu_ops - Set the global pmu method table.
+ * @ops: Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+ pmu_ops = &mfld_pmu_ops;
+}
+
+struct platform_pmu_ops mfld_pmu_ops = {
+ .init = mfld_pmu_init,
+ .enter = mfld_pmu_enter,
+ .wakeup = mfld_pmu_wakeup,
+ .remove = mfld_pmu_remove,
+ .pci_choose_state = mfld_pmu_choose_state,
+ .set_power_state_ops = pmu_set_s0ix_possible,
+ .set_s0ix_complete = s0ix_complete,
+ .nc_set_power_state = mdfld_clv_nc_set_power_state,
+};
--- /dev/null
+/*
+ * intel_soc_mdfld.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_REMOVEME_ATOM_MDFLD_POWER
+
+#define PM_SUPPORT 0x21
+
+#define ISP_POS 7
+#define ISP_SUB_CLASS 0x80
+#define C6_OFFLOAD_REG_ADDR 0xffd01ffc
+#define PMU_MISC_SET_TIMEOUT 50 /* 50usec timeout */
+#define PMU_C6OFFLOAD_ACCESS_TIMEOUT 1500 /* 1.5msecs timeout */
+
+#define PMU1_MAX_DEVS 8
+#define PMU2_MAX_DEVS 55
+
+#define GFX_LSS_INDEX 1
+#define PMU_SDIO0_LSS_00 0
+#define PMU_EMMC0_LSS_01 1
+#define PMU_AONT_LSS_02 2
+#define PMU_HSI_LSS_03 3
+#define PMU_SECURITY_LSS_04 4
+#define PMU_EMMC1_LSS_05 5
+#define PMU_USB_OTG_LSS_06 6
+#define PMU_USB_HSIC_LSS_07 7
+#define PMU_AUDIO_ENGINE_LSS_08 8
+#define PMU_AUDIO_DMA_LSS_09 9
+#define PMU_SRAM_LSS_10 10
+#define PMU_SRAM_LSS_11 11
+#define PMU_SRAM_LSS_12 12
+#define PMU_SRAM_LSS_13 13
+#define PMU_SDIO2_LSS_14 14
+#define PMU_PTI_DAFCA_LSS_15 15
+#define PMU_SC_DMA_LSS_16 16
+#define PMU_SPIO_LSS_17 17
+#define PMU_SPI1_LSS_18 18
+#define PMU_SPI2_LSS_19 19
+#define PMU_I2C0_LSS_20 20
+#define PMU_I2C1_LSS_21 21
+#define PMU_MAIN_FABRIC_LSS_22 22
+#define PMU_SEC_FABRIC_LSS_23 23
+#define PMU_SC_FABRIC_LSS_24 24
+#define PMU_AUDIO_RAM_LSS_25 25
+#define PMU_SCU_ROM_LSS_26 26
+#define PMU_I2C2_LSS_27 27
+#define PMU_SSC_LSS_28 28
+#define PMU_SECURITY_LSS_29 29
+#define PMU_SDIO1_LSS_30 30
+#define PMU_SCU_RAM0_LSS_31 31
+#define PMU_SCU_RAM1_LSS_32 32
+#define PMU_I2C3_LSS_33 33
+#define PMU_I2C4_LSS_34 34
+#define PMU_I2C5_LSS_35 35
+#define PMU_SPI3_LSS_36 36
+#define PMU_GPIO1_LSS_37 37
+#define PMU_PWR_BUTTON_LSS_38 38
+#define PMU_GPIO0_LSS_39 39
+#define PMU_KEYBRD_LSS_40 40
+#define PMU_UART2_LSS_41 41
+#define PMU_ADC_LSS_42 42
+#define PMU_CHARGER_LSS_43 43
+#define PMU_SEC_TAPC_LSS_44 44
+#define PMU_RTC_LSS_45 45
+#define PMU_GPI_LSS_46 46
+#define PMU_HDMI_VREG_LSS_47 47
+#define PMU_RESERVED_LSS_48 48
+#define PMU_AUDIO_SLIM1_LSS_49 49
+#define PMU_RESET_LSS_50 50
+#define PMU_AUDIO_SSP0_LSS_51 51
+#define PMU_AUDIO_SSP1_LSS_52 52
+#define PMU_IOSF_OCP_BRG_LSS_53 53
+#define PMU_GP_DMA_LSS_54 54
+#define PMU_SVID_LSS_55 55
+#define PMU_SOC_FUSE_LSS_56 56
+#define PMU_RSVD3_LSS_57 57
+#define PMU_RSVD4_LSS_58 58
+#define PMU_RSVD5_LSS_59 59
+#define PMU_RSVD6_LSS_60 60
+#define PMU_RSVD7_LSS_61 61
+#define PMU_RSVD8_LSS_62 62
+#define PMU_RSVD9_LSS_63 63
+
+#define PMU_MAX_LSS 63
+#define PMU_LSS_IN_FIRST_DWORD 32
+
+#define EMMC0_LSS PMU_EMMC0_LSS_01
+
+#define S0IX_TARGET_SSS0_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+#define S0IX_TARGET_SSS2_MASK ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3_MASK ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define S0IX_TARGET_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0IX_TARGET_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1_MASK ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2_MASK ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3_MASK ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I0_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define IGNORE_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_10) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_11) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+ SSMSK(D0I3_MASK, PMU_PTI_DAFCA_LSS_15))
+
+#define IGNORE_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SC_DMA_LSS_16-16) | \
+ SSMSK(D0I3_MASK, PMU_SPIO_LSS_17-16) | \
+ SSMSK(D0I3_MASK, PMU_MAIN_FABRIC_LSS_22-16) | \
+ SSMSK(D0I3_MASK, PMU_SEC_FABRIC_LSS_23-16) | \
+ SSMSK(D0I3_MASK, PMU_SC_FABRIC_LSS_24-16) | \
+ SSMSK(D0I3_MASK, PMU_SCU_ROM_LSS_26-16) | \
+ SSMSK(D0I3_MASK, PMU_SSC_LSS_28-16) | \
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_29-16) | \
+ SSMSK(D0I3_MASK, PMU_SCU_RAM0_LSS_31-16))
+
+#define IGNORE_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_SCU_RAM1_LSS_32-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO0_LSS_39-32) | \
+ SSMSK(D0I3_MASK, PMU_ADC_LSS_42-32) | \
+ SSMSK(D0I3_MASK, PMU_CHARGER_LSS_43-32) | \
+ SSMSK(D0I3_MASK, PMU_SEC_TAPC_LSS_44-32) | \
+ SSMSK(D0I3_MASK, PMU_RTC_LSS_45-32) | \
+ SSMSK(D0I3_MASK, PMU_GPI_LSS_46-32) | \
+ SSMSK(D0I3_MASK, PMU_HDMI_VREG_LSS_47-32))
+
+#define IGNORE_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_IOSF_OCP_BRG_LSS_53-48) | \
+ SSMSK(D0I3_MASK, PMU_SVID_LSS_55-48) | \
+ SSMSK(D0I3_MASK, PMU_SOC_FUSE_LSS_56-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD3_LSS_57-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD4_LSS_58-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD5_LSS_59-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD6_LSS_60-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD7_LSS_61-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD8_LSS_62-48) | \
+ SSMSK(D0I3_MASK, PMU_RSVD9_LSS_63-48))
+
+#define IGNORE_S3_WKC0 SSWKC(PMU_AONT_LSS_02)
+#define IGNORE_S3_WKC1 SSWKC(PMU_ADC_LSS_42-32)
+
+#define S0I3_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+ SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0I3_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_RAM_LSS_25-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0I3_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0I3_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+ SSMSK(D0I3_MASK, PMU_RESET_LSS_50-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+ SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+#define S0I1_SSS0 S0I3_SSS0
+#define S0I1_SSS1 S0I3_SSS1
+#define S0I1_SSS2 S0I3_SSS2
+#define S0I1_SSS3 S0I3_SSS3
+
+#define LPMP3_SSS0 ( \
+ SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+ SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+ SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+ SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+ SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+ SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+ SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+ SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_SSS1 ( \
+ SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+ SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+ SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+ SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_SSS2 ( \
+ SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+ SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+ SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+ SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+ SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+ SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+ SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_SSS3 ( \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+ SSMSK(D0I3_MASK, PMU_RESET_LSS_50-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+ SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+ SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+extern void pmu_set_s0ix_possible(int state);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern int mdfld_clv_nc_set_power_state(int, int, int, int *);
+
+
+#endif
--- /dev/null
+/*
+ * intel_soc_mdfld_clv_common.c - This driver provides utility api's common for
+ * mdfld and clv platforms
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+static int extended_cstate_mode = MID_S0IX_STATE;
+int set_extended_cstate_mode(const char *val, struct kernel_param *kp)
+{
+ char valcp[5];
+ int cstate_mode;
+
+ memcpy(valcp, val, 5);
+ valcp[4] = '\0';
+
+ if (strcmp(valcp, "s0i1") == 0)
+ cstate_mode = MID_S0I1_STATE;
+ else if (strcmp(valcp, "lmp3") == 0)
+ cstate_mode = MID_LPMP3_STATE;
+ else if (strcmp(valcp, "s0i3") == 0)
+ cstate_mode = MID_S0I3_STATE;
+ else if (strcmp(valcp, "i1i3") == 0)
+ cstate_mode = MID_I1I3_STATE;
+ else if (strcmp(valcp, "lpi1") == 0)
+ cstate_mode = MID_LPI1_STATE;
+ else if (strcmp(valcp, "lpi3") == 0)
+ cstate_mode = MID_LPI3_STATE;
+ else if (strcmp(valcp, "s0ix") == 0)
+ cstate_mode = MID_S0IX_STATE;
+ else {
+ cstate_mode = 0;
+ strncpy(valcp, "none", 5);
+ }
+ memcpy(s0ix, valcp, 5);
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+ extended_cstate_mode = cstate_mode;
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return 0;
+}
+
+int get_extended_cstate_mode(char *buffer, struct kernel_param *kp)
+{
+ strcpy(buffer, s0ix);
+ return 4;
+}
+
+/*
+ *Decide which state the platfrom can go to based on user and
+ *platfrom inputs
+*/
+static int get_final_state(unsigned long *eax)
+{
+ int ret = 0;
+ int possible = mid_pmu_cxt->s0ix_possible;
+
+ switch (extended_cstate_mode) {
+ case MID_S0I1_STATE:
+ case MID_S0I3_STATE:
+ case MID_I1I3_STATE:
+ /* user asks s0i1/s0i3 then only
+ * do s0i1/s0i3, dont do lpmp3
+ */
+ if (possible == MID_S0IX_STATE)
+ ret = extended_cstate_mode & possible;
+ break;
+
+ case MID_LPMP3_STATE:
+ /* user asks lpmp3 then only
+ * do lpmp3
+ */
+ if (possible == MID_LPMP3_STATE)
+ ret = MID_LPMP3_STATE;
+ break;
+
+ case MID_LPI1_STATE:
+ case MID_LPI3_STATE:
+ /* user asks lpmp3/i1/i3 then only
+ * do lpmp3/i1/i3
+ */
+ if (possible == MID_LPMP3_STATE)
+ ret = MID_LPMP3_STATE;
+ else if (possible == MID_S0IX_STATE)
+ ret = extended_cstate_mode >> REMOVE_LP_FROM_LPIX;
+ break;
+
+ case MID_S0IX_STATE:
+ ret = possible;
+ break;
+ }
+
+ if ((ret == MID_S0IX_STATE) &&
+ (*eax == MID_LPMP3_STATE))
+ ret = MID_S0I1_STATE;
+ else if ((ret <= *eax ||
+ (ret == MID_S0IX_STATE)))
+ ret = ret & *eax;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static bool check_s0ix_possible(struct pmu_ss_states *pmsss)
+{
+ if (((pmsss->pmu2_states[0] & S0IX_TARGET_SSS0_MASK) ==
+ S0IX_TARGET_SSS0) &&
+ ((pmsss->pmu2_states[1] & S0IX_TARGET_SSS1_MASK) ==
+ S0IX_TARGET_SSS1) &&
+ ((pmsss->pmu2_states[2] & S0IX_TARGET_SSS2_MASK) ==
+ S0IX_TARGET_SSS2) &&
+ ((pmsss->pmu2_states[3] & S0IX_TARGET_SSS3_MASK) ==
+ S0IX_TARGET_SSS3))
+ return true;
+
+ return false;
+}
+
+static bool check_lpmp3_possible(struct pmu_ss_states *pmsss)
+{
+ if (((pmsss->pmu2_states[0] & LPMP3_TARGET_SSS0_MASK) ==
+ LPMP3_TARGET_SSS0) &&
+ ((pmsss->pmu2_states[1] & LPMP3_TARGET_SSS1_MASK) ==
+ LPMP3_TARGET_SSS1) &&
+ ((pmsss->pmu2_states[2] & LPMP3_TARGET_SSS2_MASK) ==
+ LPMP3_TARGET_SSS2) &&
+ ((pmsss->pmu2_states[3] & LPMP3_TARGET_SSS3_MASK) ==
+ LPMP3_TARGET_SSS3))
+ return true;
+
+ return false;
+}
+
+void pmu_set_s0ix_possible(int state)
+{
+ /* assume S0ix not possible */
+ mid_pmu_cxt->s0ix_possible = 0;
+
+ if (state != PCI_D0) {
+ struct pmu_ss_states cur_pmsss;
+
+ pmu_read_sss(&cur_pmsss);
+
+ if (likely(check_s0ix_possible(&cur_pmsss)))
+ mid_pmu_cxt->s0ix_possible = MID_S0IX_STATE;
+ else if (check_lpmp3_possible(&cur_pmsss))
+ mid_pmu_cxt->s0ix_possible = MID_LPMP3_STATE;
+ }
+}
+
+int get_target_platform_state(unsigned long *eax)
+{
+ int ret = 0;
+
+ if (unlikely(!pmu_initialized))
+ goto ret;
+
+ /* dont do s0ix if suspend in progress */
+ if (unlikely(mid_pmu_cxt->suspend_started))
+ goto ret;
+
+ /* dont do s0ix if shutdown in progress */
+ if (unlikely(mid_pmu_cxt->shutdown_started))
+ goto ret;
+
+ if (nc_device_state())
+ goto ret;
+
+ ret = get_final_state(eax);
+
+ret:
+ *eax = C6_HINT;
+ return ret;
+}
+EXPORT_SYMBOL(get_target_platform_state);
+
+u32 get_s0ix_val_set_pm_ssc(int s0ix_state)
+{
+ u32 s0ix_value = 0;
+
+ switch (s0ix_state) {
+ case MID_S0I1_STATE:
+ writel(S0I1_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(S0I1_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(S0I1_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(S0I1_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+ pmu_stat_start(SYS_STATE_S0I1);
+ s0ix_value = S0I1_VALUE;
+ break;
+ case MID_LPMP3_STATE:
+ writel(LPMP3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(LPMP3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(LPMP3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(LPMP3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+ pmu_stat_start(SYS_STATE_S0I2);
+ s0ix_value = LPMP3_VALUE;
+ break;
+ case MID_S0I3_STATE:
+ writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+ pmu_stat_start(SYS_STATE_S0I3);
+ s0ix_value = S0I3_VALUE;
+ break;
+ case MID_S3_STATE:
+ writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+ pmu_stat_start(SYS_STATE_S3);
+ s0ix_value = S0I3_VALUE;
+ break;
+ case MID_FAST_ON_OFF_STATE:
+ writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+ pmu_stat_start(SYS_STATE_S3);
+ s0ix_value = FAST_ON_OFF_VALUE;
+ break;
+ default:
+ pmu_dump_logs();
+ BUG_ON(1);
+ }
+ return s0ix_value;
+}
+
+void platform_update_all_lss_states(struct pmu_ss_states *pmu_config,
+ int *PCIALLDEV_CFG)
+{
+ /* We shutdown devices that are in the target config, and that are
+ not in the pci table, some devices are indeed not advertised in pci
+ table for certain firmwares. This is the case for HSI firmwares,
+ SPI3 device is not advertised, and would then prevent s0i3. */
+ /* Also take IGNORE_CFG in account (for e.g. GPIO1)*/
+ pmu_config->pmu2_states[0] |= S0IX_TARGET_SSS0_MASK & ~PCIALLDEV_CFG[0];
+ pmu_config->pmu2_states[0] &= ~IGNORE_SSS0;
+ pmu_config->pmu2_states[1] |= S0IX_TARGET_SSS1_MASK & ~PCIALLDEV_CFG[1];
+ pmu_config->pmu2_states[1] &= ~IGNORE_SSS1;
+ pmu_config->pmu2_states[2] |= S0IX_TARGET_SSS2_MASK & ~PCIALLDEV_CFG[2];
+ pmu_config->pmu2_states[2] &= ~IGNORE_SSS2;
+ pmu_config->pmu2_states[3] |= S0IX_TARGET_SSS3_MASK & ~PCIALLDEV_CFG[3];
+ pmu_config->pmu2_states[3] &= ~IGNORE_SSS3;
+}
+
+void s0ix_complete(void)
+{
+ if (unlikely(mid_pmu_cxt->s0ix_entered))
+ writel(0, &mid_pmu_cxt->pmu_reg->pm_msic);
+}
+
+/*
+ * Valid wake source: lss_number 0 to 63
+ * Returns true if 'lss_number' is wake source
+ * else false
+ */
+bool mid_pmu_is_wake_source(u32 lss_number)
+{
+ u32 wake = 0;
+ bool ret = false;
+
+ if (lss_number > PMU_MAX_LSS)
+ return ret;
+
+ if (lss_number < PMU_LSS_IN_FIRST_DWORD) {
+ wake = readl(&mid_pmu_cxt->pmu_reg->pm_wks[0]);
+ wake &= (1 << lss_number);
+ } else {
+ wake = readl(&mid_pmu_cxt->pmu_reg->pm_wks[1]);
+ wake &= (1 << (lss_number - PMU_LSS_IN_FIRST_DWORD));
+ }
+
+ if (wake)
+ ret = true;
+
+ return ret;
+}
+
+static void log_wakeup_source(int source)
+{
+ enum sys_state type = mid_pmu_cxt->pmu_current_state;
+
+ mid_pmu_cxt->num_wakes[source][type]++;
+
+ trace_printk("wake_from_lss%d\n",
+ source - mid_pmu_cxt->pmu1_max_devs);
+
+ if ((mid_pmu_cxt->pmu_current_state != SYS_STATE_S3)
+ || !mid_pmu_cxt->suspend_started)
+ return;
+
+ switch (source - mid_pmu_cxt->pmu1_max_devs) {
+ case PMU_USB_OTG_LSS_06:
+ pr_info("wakeup from USB.\n");
+ break;
+ case PMU_GPIO0_LSS_39:
+ pr_info("wakeup from GPIO.\n");
+ break;
+ case PMU_HSI_LSS_03:
+ pr_info("wakeup from HSI.\n");
+ break;
+ default:
+ pr_info("wakeup from LSS%02d.\n",
+ source - mid_pmu_cxt->pmu1_max_devs);
+ break;
+ }
+}
+
+/* return the last wake source id, and make statistics about wake sources */
+int pmu_get_wake_source(void)
+{
+ u32 wake0, wake1;
+ int i;
+ int source = INVALID_WAKE_SRC;
+
+ wake0 = readl(&mid_pmu_cxt->pmu_reg->pm_wks[0]);
+ wake1 = readl(&mid_pmu_cxt->pmu_reg->pm_wks[1]);
+
+ if (!wake0 && !wake1) {
+ log_wakeup_irq();
+ goto out;
+ }
+
+ while (wake0) {
+ i = fls(wake0) - 1;
+ source = i + mid_pmu_cxt->pmu1_max_devs;
+ log_wakeup_source(source);
+ wake0 &= ~(1<<i);
+ }
+
+ while (wake1) {
+ i = fls(wake1) - 1;
+ source = i + 32 + mid_pmu_cxt->pmu1_max_devs;
+ log_wakeup_source(source);
+ wake1 &= ~(1<<i);
+ }
+out:
+ return source;
+}
+
+static int wait_for_nc_pmcmd_complete(int verify_mask, int state_type
+ , int reg_type)
+{
+ int pwr_sts;
+ int count = 0;
+ u32 addr;
+
+ switch (reg_type) {
+ case APM_REG_TYPE:
+ addr = mid_pmu_cxt->apm_base + APM_STS;
+ break;
+ case OSPM_REG_TYPE:
+ addr = mid_pmu_cxt->ospm_base + OSPM_PM_SSS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while (true) {
+ pwr_sts = inl(addr);
+ if (state_type == OSPM_ISLAND_DOWN) {
+ if ((pwr_sts & verify_mask) == verify_mask)
+ break;
+ else
+ udelay(10);
+ } else if (state_type == OSPM_ISLAND_UP) {
+ if (pwr_sts == verify_mask)
+ break;
+ else
+ udelay(10);
+ }
+ count++;
+ if (WARN_ONCE(count > 500000, "Timed out waiting for P-Unit"))
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int mdfld_clv_nc_set_power_state(int islands, int state_type,
+ int reg_type, int *change)
+{
+ u32 pwr_cnt = 0;
+ u32 pwr_mask = 0;
+ int i, lss, mask;
+ int ret = 0;
+
+ *change = 0;
+
+ switch (reg_type) {
+ case APM_REG_TYPE:
+ pwr_cnt = inl(mid_pmu_cxt->apm_base + APM_STS);
+ break;
+ case OSPM_REG_TYPE:
+ pwr_cnt = inl(mid_pmu_cxt->ospm_base + OSPM_PM_SSS);
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pwr_mask = pwr_cnt;
+ for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+ lss = islands & (0x1 << i);
+ if (lss) {
+ mask = D0I3_MASK << (BITS_PER_LSS * i);
+ if (state_type == OSPM_ISLAND_DOWN)
+ pwr_mask |= mask;
+ else if (state_type == OSPM_ISLAND_UP)
+ pwr_mask &= ~mask;
+ }
+ }
+
+ if (pwr_mask != pwr_cnt) {
+ switch (reg_type) {
+ case APM_REG_TYPE:
+ outl(pwr_mask, mid_pmu_cxt->apm_base + APM_CMD);
+ break;
+ case OSPM_REG_TYPE:
+ outl(pwr_mask, mid_pmu_cxt->ospm_base + OSPM_PM_SSC);
+ break;
+ }
+
+ ret =
+ wait_for_nc_pmcmd_complete(pwr_mask, state_type, reg_type);
+ if (!ret)
+ *change = 1;
+ if (nc_report_power_state)
+ nc_report_power_state(pwr_mask, reg_type);
+ }
+
+unlock:
+ return ret;
+}
--- /dev/null
+/*
+ * intel_soc_mrfld.c - This driver provides utility api's for merrifield
+ * platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+u32 __iomem *residency[SYS_STATE_MAX];
+u32 __iomem *s0ix_counter[SYS_STATE_MAX];
+
+/* list of north complex devices */
+char *mrfl_nc_devices[] = {
+ "GFXSLC",
+ "GSDKCK",
+ "GRSCD",
+ "VED",
+ "VEC",
+ "DPA",
+ "DPB",
+ "DPC",
+ "VSP",
+ "ISP",
+ "MIO",
+ "HDMIO",
+ "GFXSLCLDO"
+};
+
+int mrfl_no_of_nc_devices =
+ sizeof(mrfl_nc_devices)/sizeof(mrfl_nc_devices[0]);
+
+static int mrfld_pmu_init(void)
+{
+ mid_pmu_cxt->s3_hint = MRFLD_S3_HINT;
+
+
+ /* Put all unused LSS in D0i3 */
+ mid_pmu_cxt->os_sss[0] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03) |
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_05) |
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_06) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+ /* Put LSS8 as unused on Tangier */
+ mid_pmu_cxt->os_sss[0] |= \
+ SSMSK(D0I3_MASK, PMU_USB_MPH_LSS_08);
+
+ mid_pmu_cxt->os_sss[1] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+ SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+ SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16)|
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+
+ /* Excpet for LSS 35 keep all in D0i3 */
+ mid_pmu_cxt->os_sss[2] = 0xFFFFFFFF;
+ mid_pmu_cxt->os_sss[3] = 0xFFFFFFFF;
+
+ mid_pmu_cxt->os_sss[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+ /* Map S0ix residency counters */
+ residency[SYS_STATE_S0I1] = ioremap_nocache(S0I1_RES_ADDR, sizeof(u64));
+ if (residency[SYS_STATE_S0I1] == NULL)
+ goto err1;
+ residency[SYS_STATE_LPMP3] = ioremap_nocache(LPMP3_RES_ADDR,
+ sizeof(u64));
+ if (residency[SYS_STATE_LPMP3] == NULL)
+ goto err2;
+ residency[SYS_STATE_S0I2] = ioremap_nocache(S0I2_RES_ADDR, sizeof(u64));
+ if (residency[SYS_STATE_S0I2] == NULL)
+ goto err3;
+ residency[SYS_STATE_S0I3] = ioremap_nocache(S0I3_RES_ADDR, sizeof(u64));
+ if (residency[SYS_STATE_S0I3] == NULL)
+ goto err4;
+
+ /* Map S0ix iteration counters */
+ s0ix_counter[SYS_STATE_S0I1] = ioremap_nocache(S0I1_COUNT_ADDR,
+ sizeof(u32));
+ if (s0ix_counter[SYS_STATE_S0I1] == NULL)
+ goto err5;
+ s0ix_counter[SYS_STATE_LPMP3] = ioremap_nocache(LPMP3_COUNT_ADDR,
+ sizeof(u32));
+ if (s0ix_counter[SYS_STATE_LPMP3] == NULL)
+ goto err6;
+ s0ix_counter[SYS_STATE_S0I2] = ioremap_nocache(S0I2_COUNT_ADDR,
+ sizeof(u32));
+ if (s0ix_counter[SYS_STATE_S0I2] == NULL)
+ goto err7;
+ s0ix_counter[SYS_STATE_S0I3] = ioremap_nocache(S0I3_COUNT_ADDR,
+ sizeof(u32));
+ if (s0ix_counter[SYS_STATE_S0I3] == NULL)
+ goto err8;
+ /* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+ if (!enable_s0ix && !enable_s3) {
+ mid_pmu_cxt->os_sss[2] &=
+ ~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+ mid_pmu_cxt->os_sss[2] &=
+ ~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+ } else {
+ mid_pmu_cxt->os_sss[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+ }
+
+ /* Disable the Interrupt Enable bit in PM ICS register */
+ pmu_clear_interrupt_enable();
+
+ return PMU_SUCCESS;
+
+err8:
+ iounmap(s0ix_counter[SYS_STATE_S0I3]);
+ s0ix_counter[SYS_STATE_S0I3] = NULL;
+err7:
+ iounmap(s0ix_counter[SYS_STATE_S0I2]);
+ s0ix_counter[SYS_STATE_S0I2] = NULL;
+err6:
+ iounmap(s0ix_counter[SYS_STATE_LPMP3]);
+ s0ix_counter[SYS_STATE_LPMP3] = NULL;
+err5:
+ iounmap(s0ix_counter[SYS_STATE_S0I1]);
+ s0ix_counter[SYS_STATE_S0I1] = NULL;
+err4:
+ iounmap(residency[SYS_STATE_S0I3]);
+ residency[SYS_STATE_S0I3] = NULL;
+err3:
+ iounmap(residency[SYS_STATE_S0I2]);
+ residency[SYS_STATE_S0I2] = NULL;
+err2:
+ iounmap(residency[SYS_STATE_LPMP3]);
+ residency[SYS_STATE_LPMP3] = NULL;
+err1:
+ iounmap(residency[SYS_STATE_S0I1]);
+ residency[SYS_STATE_S0I1] = NULL;
+
+ pr_err("Cannot map memory to read S0ix residency and count\n");
+ return PMU_FAILED;
+}
+
+/* This function checks north complex (NC) and
+ * south complex (SC) device status in MRFLD.
+ * returns TRUE if all NC and SC devices are in d0i3
+ * else FALSE.
+ */
+static bool mrfld_nc_sc_status_check(void)
+{
+ int i;
+ u32 val, nc_pwr_sts;
+ struct pmu_ss_states cur_pmsss;
+ bool nc_status, sc_status;
+
+ /* assuming nc and sc are good */
+ nc_status = true;
+ sc_status = true;
+
+ /* Check south complex device status */
+ pmu_read_sss(&cur_pmsss);
+
+ if (!(((cur_pmsss.pmu2_states[0] & S0IX_TARGET_SSS0_MASK) ==
+ S0IX_TARGET_SSS0) &&
+ ((cur_pmsss.pmu2_states[1] & S0IX_TARGET_SSS1_MASK) ==
+ S0IX_TARGET_SSS1) &&
+ ((cur_pmsss.pmu2_states[2] & S0IX_TARGET_SSS2_MASK) ==
+ S0IX_TARGET_SSS2) &&
+ ((cur_pmsss.pmu2_states[3] & S0IX_TARGET_SSS3_MASK) ==
+ (S0IX_TARGET_SSS3)))) {
+ sc_status = false;
+ pr_warn("SC device/devices not in d0i3!!\n");
+ for (i = 0; i < 4; i++)
+ pr_warn("pmu2_states[%d] = %08lX\n", i,
+ cur_pmsss.pmu2_states[i]);
+ }
+
+ if (sc_status) {
+ /* Check north complex status */
+ nc_pwr_sts =
+ intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+ /* loop through the status to see if any of nc power island
+ * is not in D0i3 state
+ */
+ for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+ val = nc_pwr_sts & 3;
+ if (val != 3) {
+ nc_status = false;
+ pr_warn("NC device (%s) is not in d0i3!!\n",
+ mrfl_nc_devices[i]);
+ pr_warn("nc_pm_sss = %08X\n", nc_pwr_sts);
+ break;
+ }
+ nc_pwr_sts >>= BITS_PER_LSS;
+ }
+ }
+
+ return nc_status & sc_status;
+}
+
+/* FIXME: Need to start the counter only if debug is
+ * needed. This will save SCU cycles if debug is
+ * disabled
+ */
+static int __init start_scu_s0ix_res_counters(void)
+{
+ int ret;
+
+ ret = intel_scu_ipc_simple_command(START_RES_COUNTER, 0);
+ if (ret) {
+ pr_err("IPC command to start res counter failed\n");
+ BUG();
+ return ret;
+ }
+ return 0;
+}
+late_initcall(start_scu_s0ix_res_counters);
+
+void platform_update_all_lss_states(struct pmu_ss_states *pmu_config,
+ int *PCIALLDEV_CFG)
+{
+ /* Overwrite the pmu_config values that we get */
+ pmu_config->pmu2_states[0] =
+ (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03) |
+ SSMSK(D0I3_MASK, PMU_HSI_LSS_05) |
+ SSMSK(D0I3_MASK, PMU_SECURITY_LSS_06) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+ /* Put LSS8 as unused on Tangier */
+ pmu_config->pmu2_states[0] |= \
+ SSMSK(D0I3_MASK, PMU_USB_MPH_LSS_08);
+
+ pmu_config->pmu2_states[1] =
+ (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+ SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+ SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+ SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16) |
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+ SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+
+ pmu_config->pmu2_states[0] &= ~IGNORE_SSS0;
+ pmu_config->pmu2_states[1] &= ~IGNORE_SSS1;
+ pmu_config->pmu2_states[2] = ~IGNORE_SSS2;
+ pmu_config->pmu2_states[3] = ~IGNORE_SSS3;
+
+ /* Excpet for LSS 35 keep all in D0i3 */
+ pmu_config->pmu2_states[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+ /* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+ if (!enable_s0ix && !enable_s3) {
+ pmu_config->pmu2_states[2] &=
+ ~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+ pmu_config->pmu2_states[2] &=
+ ~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+ } else {
+ pmu_config->pmu2_states[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+ }
+}
+
+/*
+ * In MDFLD and CLV this callback is used to issue
+ * PM_CMD which is not required in MRFLD
+ */
+static bool mrfld_pmu_enter(int s0ix_state)
+{
+ mid_pmu_cxt->s0ix_entered = s0ix_state;
+ if (s0ix_state == MID_S3_STATE) {
+ mid_pmu_cxt->pmu_current_state = SYS_STATE_S3;
+ pmu_set_interrupt_enable();
+ }
+
+ return true;
+}
+
+/**
+ * platform_set_pmu_ops - Set the global pmu method table.
+ * @ops: Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+ pmu_ops = &mrfld_pmu_ops;
+}
+
+/*
+ * As of now since there is no sequential mapping between
+ * LSS abd WKS bits the following two calls are dummy
+ */
+
+bool mid_pmu_is_wake_source(u32 lss_number)
+{
+ return false;
+}
+
+/* return the last wake source id, and make statistics about wake sources */
+int pmu_get_wake_source(void)
+{
+ return INVALID_WAKE_SRC;
+}
+
+
+int set_extended_cstate_mode(const char *val, struct kernel_param *kp)
+{
+ return 0;
+}
+
+int get_extended_cstate_mode(char *buffer, struct kernel_param *kp)
+{
+ const char *default_string = "not supported";
+ strcpy(buffer, default_string);
+ return strlen(default_string);
+}
+
+static int wait_for_nc_pmcmd_complete(int verify_mask,
+ int status_mask, int state_type , int reg)
+{
+ int pwr_sts;
+ int count = 0;
+
+ while (true) {
+ pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+ pwr_sts = pwr_sts >> SSS_SHIFT;
+ if (state_type == OSPM_ISLAND_DOWN ||
+ state_type == OSPM_ISLAND_SR) {
+ if ((pwr_sts & status_mask) ==
+ (verify_mask & status_mask))
+ break;
+ else
+ udelay(10);
+ } else if (state_type == OSPM_ISLAND_UP) {
+ if ((~pwr_sts & status_mask) ==
+ (~verify_mask & status_mask))
+ break;
+ else
+ udelay(10);
+ }
+
+ count++;
+ if (WARN_ONCE(count > 500000, "Timed out waiting for P-Unit"))
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int mrfld_nc_set_power_state(int islands, int state_type,
+ int reg, int *change)
+{
+ u32 pwr_sts = 0;
+ u32 pwr_mask = 0;
+ int i, lss, mask;
+ int ret = 0;
+ int status_mask = 0;
+
+ *change = 0;
+ pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+ pwr_mask = pwr_sts;
+
+ for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+ lss = islands & (0x1 << i);
+ if (lss) {
+ mask = D0I3_MASK << (BITS_PER_LSS * i);
+ status_mask = status_mask | mask;
+ if (state_type == OSPM_ISLAND_DOWN)
+ pwr_mask |= mask;
+ else if (state_type == OSPM_ISLAND_UP)
+ pwr_mask &= ~mask;
+ /* Soft reset case */
+ else if (state_type == OSPM_ISLAND_SR) {
+ pwr_mask &= ~mask;
+ mask = SR_MASK << (BITS_PER_LSS * i);
+ pwr_mask |= mask;
+ }
+ }
+ }
+
+ if (pwr_mask != pwr_sts) {
+ intel_mid_msgbus_write32(PUNIT_PORT, reg, pwr_mask);
+ ret = wait_for_nc_pmcmd_complete(pwr_mask,
+ status_mask, state_type, reg);
+ if (!ret)
+ *change = 1;
+ if (nc_report_power_state)
+ nc_report_power_state(pwr_mask, reg);
+ }
+
+ return ret;
+}
+
+void s0ix_complete(void)
+{
+ if (mid_pmu_cxt->s0ix_entered) {
+ log_wakeup_irq();
+
+ if (mid_pmu_cxt->s0ix_entered == SYS_STATE_S3)
+ pmu_clear_interrupt_enable();
+
+ mid_pmu_cxt->pmu_current_state =
+ mid_pmu_cxt->s0ix_entered = 0;
+ }
+}
+
+bool could_do_s0ix(void)
+{
+ bool ret = false;
+ if (unlikely(!pmu_initialized))
+ goto ret;
+
+ /* dont do s0ix if suspend in progress */
+ if (unlikely(mid_pmu_cxt->suspend_started))
+ goto ret;
+
+ /* dont do s0ix if shutdown in progress */
+ if (unlikely(mid_pmu_cxt->shutdown_started))
+ goto ret;
+
+ if (nc_device_state())
+ goto ret;
+
+ ret = true;
+ret:
+ return ret;
+}
+EXPORT_SYMBOL(could_do_s0ix);
+
+struct platform_pmu_ops mrfld_pmu_ops = {
+ .init = mrfld_pmu_init,
+ .enter = mrfld_pmu_enter,
+ .set_s0ix_complete = s0ix_complete,
+ .nc_set_power_state = mrfld_nc_set_power_state,
+ .check_nc_sc_status = mrfld_nc_sc_status_check,
+};
--- /dev/null
+/*
+ * intel_soc_mrfld.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+
+#define PM_SUPPORT 0x21
+
+#define ISP_POS 7
+#define ISP_SUB_CLASS 0x80
+
+#define PUNIT_PORT 0x04
+#define SSS_SHIFT 24
+
+/* Soft reset mask */
+#define SR_MASK 0x2
+
+#define PMU1_MAX_DEVS 8
+#define PMU2_MAX_DEVS 55
+
+#define MRFLD_S3_HINT 0x64
+
+#define PUNIT_PORT 0x04
+#define NC_PM_SSS 0x3F
+
+/* SRAM locations to get S0ix residency */
+#define S0I1_RES_ADDR 0xFFFFF560
+#define LPMP3_RES_ADDR 0xFFFFF578
+#define S0I2_RES_ADDR 0xFFFFF568
+#define S0I3_RES_ADDR 0xFFFFF570
+
+/* SRAM locations to get S0ix count */
+#define S0I1_COUNT_ADDR 0xFFFFF588
+#define LPMP3_COUNT_ADDR 0xFFFFF594
+#define S0I2_COUNT_ADDR 0xFFFFF58C
+#define S0I3_COUNT_ADDR 0xFFFFF590
+
+/* IPC commands to start, stop and
+ * dump S0ix residency counters */
+#define START_RES_COUNTER 0x00EB
+#define STOP_RES_COUNTER 0x10EB
+#define DUMP_RES_COUNTER 0x20EB
+
+/* IPC commands to start/reset and
+ * dump S0ix count */
+#define START_S0IX_COUNT 0x00E1
+#define DUMP_S0IX_COUNT 0x10E1
+
+#define GFX_LSS_INDEX 1
+
+#define PMU_PSH_LSS_00 0
+#define PMU_SDIO0_LSS_01 1
+#define PMU_EMMC0_LSS_02 2
+#define PMU_RESERVED_LSS_03 3
+#define PMU_SDIO1_LSS_04 4
+#define PMU_HSI_LSS_05 5
+#define PMU_SECURITY_LSS_06 6
+#define PMU_RESERVED_LSS_07 7
+#define PMU_USB_MPH_LSS_08 8
+#define PMU_USB3_LSS_09 9
+#define PMU_AUDIO_LSS_10 10
+#define PMU_RESERVED_LSS_11 11
+#define PMU_RESERVED_LSS_12 12
+#define PMU_RESERVED_LSS_13 13
+#define PMU_RESERVED_LSS_14 14
+#define PMU_RESERVED_LSS_15 15
+#define PMU_RESERVED_LSS_16 16
+#define PMU_SSP3_LSS_17 17
+#define PMU_SSP5_LSS_18 18
+#define PMU_SSP6_LSS_19 19
+#define PMU_I2C1_LSS_20 20
+#define PMU_I2C2_LSS_21 21
+#define PMU_I2C3_LSS_22 22
+#define PMU_I2C4_LSS_23 23
+#define PMU_I2C5_LSS_24 24
+#define PMU_GP_DMA_LSS_25 25
+#define PMU_I2C6_LSS_26 26
+#define PMU_I2C7_LSS_27 27
+#define PMU_USB_OTG_LSS_28 28
+#define PMU_RESERVED_LSS_29 29
+#define PMU_RESERVED_LSS_30 30
+#define PMU_UART0_LSS_31 31
+#define PMU_UART1_LSS_31 31
+#define PMU_UART2_LSS_31 31
+
+#define PMU_I2C8_LSS_33 33
+#define PMU_I2C9_LSS_34 34
+#define PMU_SSP4_LSS_35 35
+#define PMU_PMW_LSS_36 36
+
+#define EMMC0_LSS PMU_EMMC0_LSS_02
+
+#define IGNORE_SSS0 0
+#define IGNORE_SSS1 0
+#define IGNORE_SSS2 0
+#define IGNORE_SSS3 0
+
+#define PMU_WAKE_GPIO0 (1 << 0)
+#define PMU_WAKE_GPIO1 (1 << 1)
+#define PMU_WAKE_GPIO2 (1 << 2)
+#define PMU_WAKE_GPIO3 (1 << 3)
+#define PMU_WAKE_GPIO4 (1 << 4)
+#define PMU_WAKE_GPIO5 (1 << 5)
+#define PMU_WAKE_TIMERS (1 << 6)
+#define PMU_WAKE_SECURITY (1 << 7)
+#define PMU_WAKE_AONT32K (1 << 8)
+#define PMU_WAKE_AONT (1 << 9)
+#define PMU_WAKE_SVID_ALERT (1 << 10)
+#define PMU_WAKE_AUDIO (1 << 11)
+#define PMU_WAKE_USB2 (1 << 12)
+#define PMU_WAKE_USB3 (1 << 13)
+#define PMU_WAKE_ILB (1 << 14)
+#define PMU_WAKE_TAP (1 << 15)
+#define PMU_WAKE_WATCHDOG (1 << 16)
+#define PMU_WAKE_HSIC (1 << 17)
+#define PMU_WAKE_PSH (1 << 18)
+#define PMU_WAKE_PSH_GPIO (1 << 19)
+#define PMU_WAKE_PSH_AONT (1 << 20)
+#define PMU_WAKE_PSH_HALT (1 << 21)
+#define PMU_GLBL_WAKE_MASK (1 << 31)
+
+/* Ignore AONT WAKES and ALL from WKC1 */
+#define IGNORE_S3_WKC0 (PMU_WAKE_AONT32K | PMU_WAKE_AONT)
+#define IGNORE_S3_WKC1 (~0)
+
+#define S0IX_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS3_MASK (0xFFFFFFFF)
+
+#define S0IX_TARGET_SSS0 (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1 (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2 (0xFFFFFF3F)
+#define S0IX_TARGET_SSS3 (0xFFFFFFFF)
+
+#define LPMP3_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define LPMP3_TARGET_SSS0 (0xFFC3FFFF)
+
+extern char *mrfl_nc_devices[];
+extern int mrfl_no_of_nc_devices;
+extern int intel_scu_ipc_simple_command(int, int);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern bool could_do_s0ix(void);
+
+#endif
--- /dev/null
+/*
+ * intel_soc_pm_debug.c - This driver provides debug utilities across
+ * multiple platforms
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/time.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/cpuidle.h>
+#include "intel_soc_pm_debug.h"
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <asm/tsc.h>
+
+#ifdef CONFIG_PM_DEBUG
+#define MAX_CSTATES_POSSIBLE 32
+
+
+
+static struct latency_stat *lat_stat;
+
+static void latency_measure_enable_disable(bool enable_measure)
+{
+ int err;
+ u32 sub;
+
+ if (enable_measure == lat_stat->latency_measure)
+ return;
+
+ if (enable_measure)
+ sub = IPC_SUB_MEASURE_START_CLVP;
+ else
+ sub = IPC_SUB_MEASURE_STOP_CLVP;
+
+ err = rpmsg_send_generic_command(IPC_CMD_S0IX_LATENCY_CLVP,
+ sub, NULL, 0, NULL, 0);
+ if (unlikely(err)) {
+ pr_err("IPC to %s S0IX Latency Measurement failed!\n",
+ enable_measure ? "start" : "stop");
+ return;
+ }
+
+ if (enable_measure) {
+ memset(lat_stat->scu_latency, 0, sizeof(lat_stat->scu_latency));
+ memset(lat_stat->os_latency, 0, sizeof(lat_stat->os_latency));
+ memset(lat_stat->s3_parts_lat, 0,
+ sizeof(lat_stat->s3_parts_lat));
+ memset(lat_stat->count, 0, sizeof(lat_stat->count));
+ }
+
+ lat_stat->latency_measure = enable_measure;
+}
+
+static void print_simple_stat(struct seq_file *s, int divisor, int rem_div,
+ int count, struct simple_stat stat)
+{
+ unsigned long long min, avg, max;
+ unsigned long min_rem = 0, avg_rem = 0, max_rem = 0;
+
+ min = stat.min;
+ max = stat.max;
+ avg = stat.total;
+
+ if (count)
+ do_div(avg, count);
+
+ if (divisor > 1) {
+ min_rem = do_div(min, divisor);
+ max_rem = do_div(max, divisor);
+ avg_rem = do_div(avg, divisor);
+ }
+
+ if (rem_div > 1) {
+ min_rem /= rem_div;
+ max_rem /= rem_div;
+ avg_rem /= rem_div;
+ }
+
+ seq_printf(s, " %5llu.%03lu/%5llu.%03lu/%5llu.%03lu",
+ min, min_rem, avg, avg_rem, max, max_rem);
+}
+
+static int show_pmu_s0ix_lat(struct seq_file *s, void *unused)
+{
+ int i = 0;
+
+ char *states[] = {
+ "S0I1",
+ "LPMP3",
+ "S0I3",
+ "S3"
+ };
+
+ char *s3_parts_names[] = {
+ "PROC_FRZ",
+ "DEV_SUS",
+ "NB_CPU_OFF",
+ "NB_CPU_ON",
+ "DEV_RES",
+ "PROC_UNFRZ"
+ };
+
+ seq_printf(s, "%29s %35s\n", "SCU Latency", "OS Latency");
+ seq_printf(s, "%33s %35s\n", "min/avg/max(msec)", "min/avg/max(msec)");
+
+ for (i = SYS_STATE_S0I1; i <= SYS_STATE_S3; i++) {
+ seq_printf(s, "\n%s(%llu)", states[i - SYS_STATE_S0I1],
+ lat_stat->count[i]);
+
+ seq_printf(s, "\n%5s", "entry");
+ print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+ lat_stat->scu_latency[i].entry);
+ seq_printf(s, " ");
+ print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+ lat_stat->count[i], lat_stat->os_latency[i].entry);
+
+ seq_printf(s, "\n%5s", "exit");
+ print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+ lat_stat->scu_latency[i].exit);
+ seq_printf(s, " ");
+ print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+ lat_stat->count[i], lat_stat->os_latency[i].exit);
+
+ }
+
+ seq_printf(s, "\n\n");
+
+ if (!lat_stat->count[SYS_STATE_S3])
+ return 0;
+
+ seq_printf(s, "S3 Latency dissection:\n");
+ seq_printf(s, "%38s\n", "min/avg/max(msec)");
+
+ for (i = 0; i < MAX_S3_PARTS; i++) {
+ seq_printf(s, "%10s\t", s3_parts_names[i]);
+ print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+ lat_stat->count[SYS_STATE_S3],
+ lat_stat->s3_parts_lat[i]);
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+
+static int pmu_s0ix_lat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_pmu_s0ix_lat, NULL);
+}
+
+static ssize_t pmu_s0ix_lat_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+
+ buf[buf_size] = 0;
+
+ if (((strlen("start") + 1) == buf_size) &&
+ !strncmp(buf, "start", strlen("start"))) {
+ latency_measure_enable_disable(true);
+ } else if (((strlen("stop") + 1) == buf_size) &&
+ !strncmp(buf, "stop", strlen("stop"))) {
+ latency_measure_enable_disable(false);
+ }
+
+ return buf_size;
+}
+
+static const struct file_operations s0ix_latency_ops = {
+ .open = pmu_s0ix_lat_open,
+ .read = seq_read,
+ .write = pmu_s0ix_lat_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void update_simple_stat(struct simple_stat *simple_stat, int count)
+{
+ u64 duration = simple_stat->curr;
+
+ if (!count) {
+ simple_stat->min =
+ simple_stat->max =
+ simple_stat->total = duration;
+ } else {
+ if (duration < simple_stat->min)
+ simple_stat->min = duration;
+ else if (duration > simple_stat->max)
+ simple_stat->max = duration;
+ simple_stat->total += duration;
+ }
+}
+
+void s0ix_scu_latency_stat(int type)
+{
+ if (!lat_stat || !lat_stat->latency_measure)
+ return;
+
+ if (type < SYS_STATE_S0I1 || type > SYS_STATE_S3)
+ return;
+
+ lat_stat->scu_latency[type].entry.curr =
+ readl(lat_stat->scu_s0ix_lat_addr);
+ lat_stat->scu_latency[type].exit.curr =
+ readl(lat_stat->scu_s0ix_lat_addr + 1);
+
+ update_simple_stat(&lat_stat->scu_latency[type].entry,
+ lat_stat->count[type]);
+ update_simple_stat(&lat_stat->scu_latency[type].exit,
+ lat_stat->count[type]);
+}
+
+void s0ix_lat_stat_init(void)
+{
+ if (!platform_is(INTEL_ATOM_CLV))
+ return;
+
+ lat_stat = kzalloc(sizeof(struct latency_stat), GFP_KERNEL);
+ if (unlikely(!lat_stat)) {
+ pr_err("Failed to allocate memory for s0ix latency!\n");
+ goto out_err0;
+ }
+
+ lat_stat->scu_s0ix_lat_addr =
+ ioremap_nocache(S0IX_LAT_SRAM_ADDR_CLVP,
+ S0IX_LAT_SRAM_SIZE_CLVP);
+ if (unlikely(!lat_stat->scu_s0ix_lat_addr)) {
+ pr_err("Failed to map SCU_S0IX_LAT_ADDR!\n");
+ goto out_err1;
+ }
+
+ lat_stat->dentry = debugfs_create_file("s0ix_latency",
+ S_IFREG | S_IRUGO, NULL, NULL, &s0ix_latency_ops);
+ if (unlikely(!lat_stat->dentry)) {
+ pr_err("Failed to create debugfs for s0ix latency!\n");
+ goto out_err2;
+ }
+
+ return;
+
+out_err2:
+ iounmap(lat_stat->scu_s0ix_lat_addr);
+out_err1:
+ kfree(lat_stat);
+ lat_stat = NULL;
+out_err0:
+ pr_err("%s: Initialization failed\n", __func__);
+}
+
+void s0ix_lat_stat_finish(void)
+{
+ if (!platform_is(INTEL_ATOM_CLV))
+ return;
+
+ if (unlikely(!lat_stat))
+ return;
+
+ if (likely(lat_stat->scu_s0ix_lat_addr))
+ iounmap(lat_stat->scu_s0ix_lat_addr);
+
+ if (likely(lat_stat->dentry))
+ debugfs_remove(lat_stat->dentry);
+
+ kfree(lat_stat);
+ lat_stat = NULL;
+}
+
+void time_stamp_in_suspend_flow(int mark, bool start)
+{
+ if (!lat_stat || !lat_stat->latency_measure)
+ return;
+
+ if (start) {
+ lat_stat->s3_parts_lat[mark].curr = cpu_clock(0);
+ return;
+ }
+
+ lat_stat->s3_parts_lat[mark].curr = cpu_clock(0) -
+ lat_stat->s3_parts_lat[mark].curr;
+}
+
+static void collect_sleep_state_latency_stat(int sleep_state)
+{
+ int i;
+ if (sleep_state == SYS_STATE_S3)
+ for (i = 0; i < MAX_S3_PARTS; i++)
+ update_simple_stat(&lat_stat->s3_parts_lat[i],
+ lat_stat->count[sleep_state]);
+
+ update_simple_stat(&lat_stat->os_latency[sleep_state].entry,
+ lat_stat->count[sleep_state]);
+ update_simple_stat(&lat_stat->os_latency[sleep_state].exit,
+ lat_stat->count[sleep_state]);
+ lat_stat->count[sleep_state]++;
+}
+
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start, bool entry)
+{
+ if (!lat_stat || !lat_stat->latency_measure)
+ return;
+
+ if (start) {
+ if (entry)
+ lat_stat->os_latency[sleep_state].entry.curr =
+ cpu_clock(0);
+ else
+ lat_stat->os_latency[sleep_state].exit.curr =
+ cpu_clock(0);
+ return;
+ }
+
+ if (entry)
+ lat_stat->os_latency[sleep_state].entry.curr = cpu_clock(0) -
+ lat_stat->os_latency[sleep_state].entry.curr;
+ else {
+ lat_stat->os_latency[sleep_state].exit.curr = cpu_clock(0) -
+ lat_stat->os_latency[sleep_state].exit.curr;
+ collect_sleep_state_latency_stat(sleep_state);
+ }
+}
+#else /* CONFIG_PM_DEBUG */
+void s0ix_scu_latency_stat(int type) {}
+void s0ix_lat_stat_init(void) {}
+void s0ix_lat_stat_finish(void) {}
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start,
+ bool entry) {}
+void time_stamp_in_suspend_flow(int mark, bool start) {}
+inline unsigned int pmu_get_new_cstate
+ (unsigned int cstate, int *index) { return cstate; };
+#endif /* CONFIG_PM_DEBUG */
+
+static char *dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+/* This can be used to report NC power transitions */
+void (*nc_report_power_state) (u32, int);
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) \
+ || defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER)
+
+#define PMU_DEBUG_PRINT_STATS (1U << 0)
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_PRINT(logging_type, s, debug_level_mask, args...) \
+ do { \
+ if (logging_type) \
+ seq_printf(s, args); \
+ else if (debug_mask & \
+ PMU_DEBUG_PRINT_##debug_level_mask) \
+ pr_info(args); \
+ } while (0)
+
+static struct island display_islands[] = {
+ {APM_REG_TYPE, APM_GRAPHICS_ISLAND, "GFX"},
+ {APM_REG_TYPE, APM_VIDEO_DEC_ISLAND, "Video Decoder"},
+ {APM_REG_TYPE, APM_VIDEO_ENC_ISLAND, "Video Encoder"},
+ {APM_REG_TYPE, APM_GL3_CACHE_ISLAND, "GL3 Cache"},
+ {OSPM_REG_TYPE, OSPM_DISPLAY_A_ISLAND, "Display A"},
+ {OSPM_REG_TYPE, OSPM_DISPLAY_B_ISLAND, "Display B"},
+ {OSPM_REG_TYPE, OSPM_DISPLAY_C_ISLAND, "Display C"},
+ {OSPM_REG_TYPE, OSPM_MIPI_ISLAND, "MIPI-DSI"}
+};
+
+static struct island camera_islands[] = {
+ {APM_REG_TYPE, APM_ISP_ISLAND, "ISP"},
+ {APM_REG_TYPE, APM_IPH_ISLAND, "Iunit PHY"}
+};
+
+static char *lss_device_status[4] = { "D0i0", "D0i1", "D0i2", "D0i3" };
+
+static int lsses_num =
+ sizeof(lsses)/sizeof(lsses[0]);
+
+#ifdef LOG_PMU_EVENTS
+static void pmu_log_timestamp(struct timespec *ts)
+{
+ if (timekeeping_suspended) {
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
+ } else {
+ ktime_get_ts(ts);
+ }
+}
+
+void pmu_log_pmu_irq(int status)
+{
+ struct mid_pmu_pmu_irq_log *log =
+ &mid_pmu_cxt->pmu_irq_log[mid_pmu_cxt->pmu_irq_log_idx];
+
+ log->status = status;
+ pmu_log_timestamp(&log->ts);
+ mid_pmu_cxt->pmu_irq_log_idx =
+ (mid_pmu_cxt->pmu_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_pmu_irq_log(void)
+{
+ struct mid_pmu_pmu_irq_log *log;
+ int i = mid_pmu_cxt->pmu_irq_log_idx, j;
+
+ printk(KERN_ERR"%d last pmu irqs:\n", LOG_SIZE);
+
+ for (j = 0; j < LOG_SIZE; j++) {
+ i ? i-- : (i = LOG_SIZE - 1);
+ log = &mid_pmu_cxt->pmu_irq_log[i];
+ printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+ log->ts.tv_sec, log->ts.tv_nsec);
+ printk(KERN_ERR"Status = 0x%02x", log->status);
+ printk(KERN_ERR"\n");
+ }
+}
+
+void pmu_log_ipc_irq(void)
+{
+ struct mid_pmu_ipc_irq_log *log =
+ &mid_pmu_cxt->ipc_irq_log[mid_pmu_cxt->ipc_irq_log_idx];
+
+ pmu_log_timestamp(&log->ts);
+ mid_pmu_cxt->ipc_irq_log_idx =
+ (mid_pmu_cxt->ipc_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_irq_log(void)
+{
+ struct mid_pmu_ipc_irq_log *log;
+ int i = mid_pmu_cxt->ipc_irq_log_idx, j;
+
+ printk(KERN_ERR"%d last ipc irqs:\n", LOG_SIZE);
+
+ for (j = 0; j < LOG_SIZE; j++) {
+ i ? i-- : (i = LOG_SIZE - 1);
+ log = &mid_pmu_cxt->ipc_irq_log[i];
+ printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+ log->ts.tv_sec, log->ts.tv_nsec);
+ printk(KERN_ERR"\n");
+ }
+}
+
+void pmu_log_ipc(u32 command)
+{
+ struct mid_pmu_ipc_log *log =
+ &mid_pmu_cxt->ipc_log[mid_pmu_cxt->ipc_log_idx];
+
+ log->command = command;
+ pmu_log_timestamp(&log->ts);
+ mid_pmu_cxt->ipc_log_idx = (mid_pmu_cxt->ipc_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_log(void)
+{
+ struct mid_pmu_ipc_log *log;
+ int i = mid_pmu_cxt->ipc_log_idx, j;
+
+ printk(KERN_ERR"%d last ipc commands:\n", LOG_SIZE);
+
+ for (j = 0; j < LOG_SIZE; j++) {
+ i ? i-- : (i = LOG_SIZE - 1);
+ log = &mid_pmu_cxt->ipc_log[i];
+ printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+ log->ts.tv_sec, log->ts.tv_nsec);
+ printk(KERN_ERR"Command: 0x%08x", log->command);
+ printk(KERN_ERR"\n");
+ }
+}
+
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc)
+{
+ struct mid_pmu_cmd_log *log =
+ &mid_pmu_cxt->cmd_log[mid_pmu_cxt->cmd_log_idx];
+
+ if (pm_ssc != NULL)
+ memcpy(&log->pm_ssc, pm_ssc, sizeof(struct pmu_ss_states));
+ else
+ memset(&log->pm_ssc, 0, sizeof(struct pmu_ss_states));
+ log->command = command;
+ pmu_log_timestamp(&log->ts);
+ mid_pmu_cxt->cmd_log_idx = (mid_pmu_cxt->cmd_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_command_log(void)
+{
+ struct mid_pmu_cmd_log *log;
+ int i = mid_pmu_cxt->cmd_log_idx, j, k;
+ u32 cmd_state;
+ printk(KERN_ERR"%d last pmu commands:\n", LOG_SIZE);
+
+ for (j = 0; j < LOG_SIZE; j++) {
+ i ? i-- : (i = LOG_SIZE - 1);
+ log = &mid_pmu_cxt->cmd_log[i];
+ cmd_state = log->command;
+ printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+ log->ts.tv_sec, log->ts.tv_nsec);
+ switch (cmd_state) {
+ case INTERACTIVE_VALUE:
+ printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit not set.\n");
+ break;
+ case INTERACTIVE_IOC_VALUE:
+ printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit set.\n");
+ break;
+ case S0I1_VALUE:
+ printk(KERN_ERR"PM_CMD = S0i1_CMD\n");
+ break;
+ case S0I3_VALUE:
+ printk(KERN_ERR"PM_CMD = S0i3_CMD\n");
+ break;
+ case LPMP3_VALUE:
+ printk(KERN_ERR"PM_CMD = LPMP3_CMD\n");
+ break;
+ default:
+ printk(KERN_ERR "Invalid PM_CMD\n");
+ break;
+ }
+ for (k = 0; k < 4; k++)
+ printk(KERN_ERR"pmu2_states[%d]: 0x%08lx\n",
+ k, log->pm_ssc.pmu2_states[k]);
+ printk(KERN_ERR"\n");
+ }
+}
+
+void pmu_dump_logs(void)
+{
+ struct timespec ts;
+
+ pmu_log_timestamp(&ts);
+ printk(KERN_ERR"Dumping out pmu logs\n");
+ printk(KERN_ERR"Timestamp: %lu.%09lu\n\n", ts.tv_sec, ts.tv_nsec);
+ printk(KERN_ERR"---------------------------------------\n\n");
+ pmu_dump_command_log();
+ printk(KERN_ERR"---------------------------------------\n\n");
+ pmu_dump_pmu_irq_log();
+ printk(KERN_ERR"---------------------------------------\n\n");
+ pmu_dump_ipc_log();
+ printk(KERN_ERR"---------------------------------------\n\n");
+ pmu_dump_ipc_irq_log();
+}
+#else
+void pmu_log_pmu_irq(int status) {}
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) {}
+void pmu_dump_logs(void) {}
+#endif /* LOG_PMU_EVENTS */
+
+void pmu_stat_start(enum sys_state type)
+{
+ mid_pmu_cxt->pmu_current_state = type;
+ mid_pmu_cxt->pmu_stats[type].last_try = cpu_clock(smp_processor_id());
+}
+
+void pmu_stat_end(void)
+{
+ enum sys_state type = mid_pmu_cxt->pmu_current_state;
+
+ if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+ mid_pmu_cxt->pmu_stats[type].last_entry =
+ mid_pmu_cxt->pmu_stats[type].last_try;
+
+ if (!mid_pmu_cxt->pmu_stats[type].count)
+ mid_pmu_cxt->pmu_stats[type].first_entry =
+ mid_pmu_cxt->pmu_stats[type].last_entry;
+
+ mid_pmu_cxt->pmu_stats[type].time +=
+ cpu_clock(smp_processor_id())
+ - mid_pmu_cxt->pmu_stats[type].last_entry;
+
+ mid_pmu_cxt->pmu_stats[type].count++;
+
+ s0ix_scu_latency_stat(type);
+ if (type >= SYS_STATE_S0I1 && type <= SYS_STATE_S0I3)
+ /* time stamp for end of s0ix exit */
+ time_stamp_for_sleep_state_latency(type, false, false);
+ }
+
+ mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+void pmu_stat_error(u8 err_type)
+{
+ enum sys_state type = mid_pmu_cxt->pmu_current_state;
+ u8 err_index;
+
+ if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+ switch (err_type) {
+ case SUBSYS_POW_ERR_INT:
+ trace_printk("S0ix_POW_ERR_INT\n");
+ err_index = 0;
+ break;
+ case S0ix_MISS_INT:
+ trace_printk("S0ix_MISS_INT\n");
+ err_index = 1;
+ break;
+ case NO_ACKC6_INT:
+ trace_printk("S0ix_NO_ACKC6_INT\n");
+ err_index = 2;
+ break;
+ default:
+ err_index = 3;
+ break;
+ }
+
+ if (err_index < 3)
+ mid_pmu_cxt->pmu_stats[type].err_count[err_index]++;
+ }
+}
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr)
+{
+ unsigned long long t;
+ unsigned long nanosec_rem, remainder;
+ unsigned long time, init_2_now_time;
+
+ seq_printf(s, "%s\t%5llu\t%10llu\t%9llu\t%9llu\t", typestr,
+ mid_pmu_cxt->pmu_stats[type].count,
+ mid_pmu_cxt->pmu_stats[type].err_count[0],
+ mid_pmu_cxt->pmu_stats[type].err_count[1],
+ mid_pmu_cxt->pmu_stats[type].err_count[2]);
+
+ t = mid_pmu_cxt->pmu_stats[type].time;
+ nanosec_rem = do_div(t, NANO_SEC);
+
+ /* convert time in secs */
+ time = (unsigned long)t;
+
+ seq_printf(s, "%5lu.%06lu\t",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ t = mid_pmu_cxt->pmu_stats[type].last_entry;
+ nanosec_rem = do_div(t, NANO_SEC);
+ seq_printf(s, "%5lu.%06lu\t",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ t = mid_pmu_cxt->pmu_stats[type].first_entry;
+ nanosec_rem = do_div(t, NANO_SEC);
+ seq_printf(s, "%5lu.%06lu\t",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ t = cpu_clock(raw_smp_processor_id());
+ t -= mid_pmu_cxt->pmu_init_time;
+ nanosec_rem = do_div(t, NANO_SEC);
+
+ init_2_now_time = (unsigned long) t;
+
+ /* for calculating percentage residency */
+ t = (u64) time;
+ t *= 100;
+
+ /* take care of divide by zero */
+ if (init_2_now_time) {
+ remainder = do_div(t, init_2_now_time);
+ time = (unsigned long) t;
+
+ /* for getting 3 digit precision after
+ * decimal dot */
+ t = (u64) remainder;
+ t *= 1000;
+ remainder = do_div(t, init_2_now_time);
+ } else
+ time = t = 0;
+
+ seq_printf(s, "%5lu.%03lu\n", time, (unsigned long) t);
+}
+
+static unsigned long pmu_dev_res_print(int index, unsigned long *precision,
+ unsigned long *sampled_time, bool dev_state)
+{
+ unsigned long long t, delta_time = 0;
+ unsigned long nanosec_rem, remainder;
+ unsigned long time, init_to_now_time;
+
+ t = cpu_clock(raw_smp_processor_id());
+
+ if (dev_state) {
+ /* print for d0ix */
+ if ((mid_pmu_cxt->pmu_dev_res[index].state != PCI_D0))
+ delta_time = t -
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_entry;
+
+ delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i3_acc;
+ } else {
+ /* print for d0i0 */
+ if ((mid_pmu_cxt->pmu_dev_res[index].state == PCI_D0))
+ delta_time = t -
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_entry;
+
+ delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i0_acc;
+ }
+
+ t -= mid_pmu_cxt->pmu_dev_res[index].start;
+ nanosec_rem = do_div(t, NANO_SEC);
+
+ init_to_now_time = (unsigned long) t;
+
+ t = delta_time;
+ nanosec_rem = do_div(t, NANO_SEC);
+
+ /* convert time in secs */
+ time = (unsigned long)t;
+ *sampled_time = time;
+
+ /* for calculating percentage residency */
+ t = (u64) time;
+ t *= 100;
+
+ /* take care of divide by zero */
+ if (init_to_now_time) {
+ remainder = do_div(t, init_to_now_time);
+ time = (unsigned long) t;
+
+ /* for getting 3 digit precision after
+ * decimal dot */
+ t = (u64) remainder;
+ t *= 1000;
+ remainder = do_div(t, init_to_now_time);
+ } else
+ time = t = 0;
+
+ *precision = (unsigned long)t;
+
+ return time;
+}
+
+static void nc_device_state_show(struct seq_file *s, struct pci_dev *pdev)
+{
+ int off, i, islands_num, state;
+ struct island *islands;
+
+ if (PCI_SLOT(pdev->devfn) == DEV_GFX &&
+ PCI_FUNC(pdev->devfn) == FUNC_GFX) {
+ off = mid_pmu_cxt->display_off;
+ islands_num = ISLANDS_GFX;
+ islands = &display_islands[0];
+ } else if (PCI_SLOT(pdev->devfn) == DEV_ISP &&
+ PCI_FUNC(pdev->devfn) == FUNC_ISP) {
+ off = mid_pmu_cxt->camera_off;
+ islands_num = ISLANDS_ISP;
+ islands = &camera_islands[0];
+ } else {
+ return;
+ }
+
+ seq_printf(s, "pci %04x %04X %s %20s: %41s %s\n",
+ pdev->vendor, pdev->device, dev_name(&pdev->dev),
+ dev_driver_string(&pdev->dev),
+ "", off ? "" : "blocking s0ix");
+ for (i = 0; i < islands_num; i++) {
+ state = pmu_nc_get_power_state(islands[i].index,
+ islands[i].type);
+ seq_printf(s, "%52s %15s %17s %s\n",
+ "|------->", islands[i].name, "",
+ (state >= 0) ? dstates[state & 3] : "ERR");
+ }
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+ struct pci_dev *pdev = NULL;
+ int index, i, pmu_num, ss_idx, ss_pos;
+ unsigned int base_class;
+ u32 target_mask, mask, val, needed;
+ struct pmu_ss_states cur_pmsss;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ _pmu2_wait_not_busy();
+ pmu_read_sss(&cur_pmsss);
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ seq_printf(s, "TARGET_CFG: ");
+ seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0_MASK);
+ seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1_MASK);
+ seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2_MASK);
+ seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3_MASK);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "CONDITION FOR S0I3: ");
+ seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0);
+ seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1);
+ seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2);
+ seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "SSS: ");
+
+ for (i = 0; i < 4; i++)
+ seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+ if (!mid_pmu_cxt->display_off)
+ seq_printf(s, "display not suspended: blocking s0ix\n");
+ else if (!mid_pmu_cxt->camera_off)
+ seq_printf(s, "camera not suspended: blocking s0ix\n");
+ else if (mid_pmu_cxt->s0ix_possible & MID_S0IX_STATE)
+ seq_printf(s, "can enter s0i1 or s0i3\n");
+ else if (mid_pmu_cxt->s0ix_possible & MID_LPMP3_STATE)
+ seq_printf(s, "can enter lpmp3\n");
+ else
+ seq_printf(s, "blocking s0ix\n");
+
+ seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+ seq_printf(s,
+ "\tcount\tsybsys_pow\ts0ix_miss\tno_ack_c6\ttime (secs)\tlast_entry");
+ seq_printf(s, "\tfirst_entry\tresidency(%%)\n");
+
+ pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1");
+ pmu_stat_seq_printf(s, SYS_STATE_S0I2, "lpmp3");
+ pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3");
+ pmu_stat_seq_printf(s, SYS_STATE_S3, "s3");
+
+ for_each_pci_dev(pdev) {
+ /* find the base class info */
+ base_class = pdev->class >> 16;
+
+ if (base_class == PCI_BASE_CLASS_BRIDGE)
+ continue;
+
+ if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+ &ss_pos))
+ continue;
+
+ if (pmu_num == PMU_NUM_1) {
+ nc_device_state_show(s, pdev);
+ continue;
+ }
+
+ mask = (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+ val = (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+ (ss_pos * BITS_PER_LSS);
+ switch (ss_idx) {
+ case 0:
+ target_mask = S0IX_TARGET_SSS0_MASK;
+ break;
+ case 1:
+ target_mask = S0IX_TARGET_SSS1_MASK;
+ break;
+ case 2:
+ target_mask = S0IX_TARGET_SSS2_MASK;
+ break;
+ case 3:
+ target_mask = S0IX_TARGET_SSS3_MASK;
+ break;
+ default:
+ target_mask = 0;
+ break;
+ }
+ needed = ((target_mask & mask) != 0);
+
+ seq_printf(s, "pci %04x %04X %s %20s: lss:%02d reg:%d"
+ "mask:%08X wk:%02d:%02d:%02d:%03d %s %s\n",
+ pdev->vendor, pdev->device, dev_name(&pdev->dev),
+ dev_driver_string(&pdev->dev),
+ index - mid_pmu_cxt->pmu1_max_devs, ss_idx, mask,
+ mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I1],
+ mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I2],
+ mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I3],
+ mid_pmu_cxt->num_wakes[index][SYS_STATE_S3],
+ dstates[val & 3],
+ (needed && !val) ? "blocking s0ix" : "");
+
+ }
+
+ return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+
+ buf[buf_size] = 0;
+
+ if (((strlen("clear")+1) == buf_size) &&
+ !strncmp(buf, "clear", strlen("clear"))) {
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memset(mid_pmu_cxt->pmu_stats, 0,
+ sizeof(mid_pmu_cxt->pmu_stats));
+ memset(mid_pmu_cxt->num_wakes, 0,
+ sizeof(mid_pmu_cxt->num_wakes));
+ mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+ mid_pmu_cxt->pmu_init_time =
+ cpu_clock(raw_smp_processor_id());
+ clear_d0ix_stats();
+ up(&mid_pmu_cxt->scu_ready_sem);
+ }
+
+ return buf_size;
+}
+
+static const struct file_operations devices_state_operations = {
+ .open = devices_state_open,
+ .read = seq_read,
+ .write = devices_state_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_pmu_lss_status(struct seq_file *s, void *unused)
+{
+ int sss_reg_index;
+ int offset;
+ int lss;
+ unsigned long status;
+ unsigned long sub_status;
+ unsigned long lss_status[4];
+ struct lss_definition *entry;
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ lss_status[0] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+ lss_status[1] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+ lss_status[2] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+ lss_status[3] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ lss = 0;
+ seq_printf(s, "%5s\t%12s %35s %5s %4s %4s %4s %4s\n",
+ "lss", "block", "subsystem", "state", "D0i0", "D0i1",
+ "D0i2", "D0i3");
+ seq_printf(s, "====================================================="
+ "=====================\n");
+ for (sss_reg_index = 0; sss_reg_index < 4; sss_reg_index++) {
+ status = lss_status[sss_reg_index];
+ for (offset = 0; offset < sizeof(unsigned long) * 8 / 2;
+ offset++) {
+ sub_status = status & 3;
+ if (lss >= lsses_num)
+ entry = &lsses[lsses_num - 1];
+ else
+ entry = &lsses[lss];
+ seq_printf(s, "%5s\t%12s %35s %4s %4d %4d %4d %4d\n",
+ entry->lss_name, entry->block,
+ entry->subsystem,
+ lss_device_status[sub_status],
+ get_d0ix_stat(lss, SS_STATE_D0I0),
+ get_d0ix_stat(lss, SS_STATE_D0I1),
+ get_d0ix_stat(lss, SS_STATE_D0I2),
+ get_d0ix_stat(lss, SS_STATE_D0I3));
+
+ status >>= 2;
+ lss++;
+ }
+ }
+
+ return 0;
+}
+
+static int pmu_sss_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_pmu_lss_status, NULL);
+}
+
+static const struct file_operations pmu_sss_state_operations = {
+ .open = pmu_sss_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_pmu_dev_stats(struct seq_file *s, void *unused)
+{
+ struct pci_dev *pdev = NULL;
+ unsigned long sampled_time, precision;
+ int index, pmu_num, ss_idx, ss_pos;
+ unsigned int base_class;
+
+ seq_printf(s, "%5s\t%20s\t%10s\t%10s\t%s\n",
+ "lss", "Name", "D0_res", "D0ix_res", "Sampled_Time");
+ seq_printf(s,
+ "==================================================================\n");
+
+ for_each_pci_dev(pdev) {
+ /* find the base class info */
+ base_class = pdev->class >> 16;
+
+ if (base_class == PCI_BASE_CLASS_BRIDGE)
+ continue;
+
+ if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+ &ss_pos))
+ continue;
+
+ if (pmu_num == PMU_NUM_1) {
+ seq_printf(s,
+ "%5s%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+ "NC", dev_driver_string(&pdev->dev),
+ pmu_dev_res_print(index, &precision,
+ &sampled_time, false),
+ precision,
+ pmu_dev_res_print(index, &precision,
+ &sampled_time, true),
+ precision, sampled_time);
+ continue;
+ }
+
+ /* Print for South Complex devices */
+ seq_printf(s, "%5d\t%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+ index - mid_pmu_cxt->pmu1_max_devs,
+ dev_driver_string(&pdev->dev),
+ pmu_dev_res_print(index, &precision, &sampled_time, false),
+ precision,
+ pmu_dev_res_print(index, &precision, &sampled_time, true),
+ precision, sampled_time);
+ }
+ return 0;
+}
+
+static int pmu_dev_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_pmu_dev_stats, NULL);
+}
+
+static const struct file_operations pmu_dev_stat_operations = {
+ .open = pmu_dev_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int pmu_stats_interval = PMU_LOG_INTERVAL_SECS;
+module_param_named(pmu_stats_interval, pmu_stats_interval,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+void pmu_s0ix_demotion_stat(int req_state, int grant_state)
+{
+ struct pmu_ss_states cur_pmsss;
+ int i, req_sys_state, offset;
+ unsigned long status, sub_status;
+ unsigned long s0ix_target_sss_mask[4] = {
+ S0IX_TARGET_SSS0_MASK,
+ S0IX_TARGET_SSS1_MASK,
+ S0IX_TARGET_SSS2_MASK,
+ S0IX_TARGET_SSS3_MASK};
+
+ unsigned long s0ix_target_sss[4] = {
+ S0IX_TARGET_SSS0,
+ S0IX_TARGET_SSS1,
+ S0IX_TARGET_SSS2,
+ S0IX_TARGET_SSS3};
+
+ unsigned long lpmp3_target_sss_mask[4] = {
+ LPMP3_TARGET_SSS0_MASK,
+ LPMP3_TARGET_SSS1_MASK,
+ LPMP3_TARGET_SSS2_MASK,
+ LPMP3_TARGET_SSS3_MASK};
+
+ unsigned long lpmp3_target_sss[4] = {
+ LPMP3_TARGET_SSS0,
+ LPMP3_TARGET_SSS1,
+ LPMP3_TARGET_SSS2,
+ LPMP3_TARGET_SSS3};
+
+ req_sys_state = mid_state_to_sys_state(req_state);
+ if ((grant_state >= C4_STATE_IDX) && (grant_state <= S0I3_STATE_IDX))
+ mid_pmu_cxt->pmu_stats
+ [req_sys_state].demote_count
+ [grant_state-C4_STATE_IDX]++;
+
+ if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+ return;
+
+ pmu_read_sss(&cur_pmsss);
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ if (!mid_pmu_cxt->camera_off)
+ mid_pmu_cxt->pmu_stats[req_sys_state].camera_blocker_count++;
+
+ if (!mid_pmu_cxt->display_off)
+ mid_pmu_cxt->pmu_stats[req_sys_state].display_blocker_count++;
+
+ if (!mid_pmu_cxt->s0ix_possible) {
+ for (i = 0; i < 4; i++) {
+ unsigned int lss_per_register;
+ if (req_state == MID_LPMP3_STATE)
+ status = lpmp3_target_sss[i] ^
+ (cur_pmsss.pmu2_states[i] &
+ lpmp3_target_sss_mask[i]);
+ else
+ status = s0ix_target_sss[i] ^
+ (cur_pmsss.pmu2_states[i] &
+ s0ix_target_sss_mask[i]);
+ if (!status)
+ continue;
+
+ lss_per_register =
+ (sizeof(unsigned long)*8)/BITS_PER_LSS;
+
+ for (offset = 0; offset < lss_per_register; offset++) {
+ sub_status = status & SS_IDX_MASK;
+ if (sub_status) {
+ mid_pmu_cxt->pmu_stats[req_sys_state].
+ blocker_count
+ [offset + lss_per_register*i]++;
+ }
+
+ status >>= BITS_PER_LSS;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+static void pmu_log_s0ix_status(int type, char *typestr,
+ struct seq_file *s, bool logging_type)
+{
+ unsigned long long t;
+ unsigned long time, remainder, init_2_now_time;
+
+ t = mid_pmu_cxt->pmu_stats[type].time;
+ remainder = do_div(t, NANO_SEC);
+
+ /* convert time in secs */
+ time = (unsigned long)t;
+
+ t = cpu_clock(0);
+ t -= mid_pmu_cxt->pmu_init_time;
+ remainder = do_div(t, NANO_SEC);
+
+ init_2_now_time = (unsigned long) t;
+
+ /* for calculating percentage residency */
+ t = (u64) time;
+ t *= 100;
+
+ /* take care of divide by zero */
+ if (init_2_now_time) {
+ remainder = do_div(t, init_2_now_time);
+ time = (unsigned long) t;
+
+ /* for getting 3 digit precision after
+ * decimal dot */
+ t = (u64) remainder;
+ t *= 1000;
+ remainder = do_div(t, init_2_now_time);
+ } else
+ time = t = 0;
+ DEBUG_PRINT(logging_type, s, STATS,
+ "%s\t%5llu\t%9llu\t%9llu\t%5lu.%03lu\n"
+ , typestr, mid_pmu_cxt->pmu_stats[type].count,
+ mid_pmu_cxt->pmu_stats[type].err_count[1],
+ mid_pmu_cxt->pmu_stats[type].err_count[2],
+ time, (unsigned long) t);
+}
+
+static void pmu_log_s0ix_demotion(int type, char *typestr,
+ struct seq_file *s, bool logging_type)
+{
+ DEBUG_PRINT(logging_type, s, STATS, "%s:\t%6d\t%6d\t%6d\t%6d\t%6d\n",
+ typestr,
+ mid_pmu_cxt->pmu_stats[type].demote_count[0],
+ mid_pmu_cxt->pmu_stats[type].demote_count[1],
+ mid_pmu_cxt->pmu_stats[type].demote_count[2],
+ mid_pmu_cxt->pmu_stats[type].demote_count[3],
+ mid_pmu_cxt->pmu_stats[type].demote_count[4]);
+}
+
+static void pmu_log_s0ix_lss_blocked(int type, char *typestr,
+ struct seq_file *s, bool logging_type)
+{
+ int i, block_count;
+
+ DEBUG_PRINT(logging_type, s, STATS, "%s: Block Count\n", typestr);
+
+ block_count = mid_pmu_cxt->pmu_stats[type].display_blocker_count;
+
+ if (block_count)
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\tDisplay blocked: %d times\n", block_count);
+
+ block_count = mid_pmu_cxt->pmu_stats[type].camera_blocker_count;
+
+ if (block_count)
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\tCamera blocked: %d times\n", block_count);
+
+ DEBUG_PRINT(logging_type, s, STATS, "\tLSS\t #blocked\n");
+
+ for (i = 0; i < MAX_LSS_POSSIBLE; i++) {
+ block_count = mid_pmu_cxt->pmu_stats[type].blocker_count[i];
+ if (block_count)
+ DEBUG_PRINT(logging_type, s, STATS, "\t%02d\t %6d\n", i,
+ block_count);
+ }
+ DEBUG_PRINT(logging_type, s, STATS, "\n");
+}
+
+static void pmu_stats_logger(bool logging_type, struct seq_file *s)
+{
+
+ if (!logging_type)
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\n----MID_PMU_STATS_LOG_BEGIN----\n");
+
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\tcount\ts0ix_miss\tno_ack_c6\tresidency(%%)\n");
+ pmu_log_s0ix_status(SYS_STATE_S0I1, "s0i1", s, logging_type);
+ pmu_log_s0ix_status(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+ pmu_log_s0ix_status(SYS_STATE_S0I3, "s0i3", s, logging_type);
+ pmu_log_s0ix_status(SYS_STATE_S3, "s3", s, logging_type);
+
+ DEBUG_PRINT(logging_type, s, STATS, "\nFrom:\tTo\n");
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\t C4\t C6\t S0i1\t Lpmp3\t S0i3\n");
+
+ /* storing C6 demotion info in S0I0 */
+ pmu_log_s0ix_demotion(SYS_STATE_S0I0, " C6", s, logging_type);
+
+ pmu_log_s0ix_demotion(SYS_STATE_S0I1, "s0i1", s, logging_type);
+ pmu_log_s0ix_demotion(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+ pmu_log_s0ix_demotion(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+ DEBUG_PRINT(logging_type, s, STATS, "\n");
+ pmu_log_s0ix_lss_blocked(SYS_STATE_S0I1, "s0i1", s, logging_type);
+ pmu_log_s0ix_lss_blocked(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+ pmu_log_s0ix_lss_blocked(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+ if (!logging_type)
+ DEBUG_PRINT(logging_type, s, STATS,
+ "\n----MID_PMU_STATS_LOG_END----\n");
+}
+
+static void pmu_log_stat(struct work_struct *work)
+{
+
+ pmu_stats_logger(false, NULL);
+
+ schedule_delayed_work(&mid_pmu_cxt->log_work,
+ msecs_to_jiffies(pmu_stats_interval*1000));
+}
+
+static int show_pmu_stats_log(struct seq_file *s, void *unused)
+{
+ pmu_stats_logger(true, s);
+ return 0;
+}
+
+static int pmu_stats_log_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_pmu_stats_log, NULL);
+}
+
+static const struct file_operations pmu_stats_log_operations = {
+ .open = pmu_stats_log_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#else
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) {}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+#endif
+
+void pmu_stats_init(void)
+{
+ struct dentry *fentry;
+
+ /* /sys/kernel/debug/mid_pmu_states */
+ (void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+ NULL, NULL, &devices_state_operations);
+
+ /* /sys/kernel/debug/pmu_sss_states */
+ (void) debugfs_create_file("pmu_sss_states", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_sss_state_operations);
+
+ /* /sys/kernel/debug/pmu_dev_stats */
+ (void) debugfs_create_file("pmu_dev_stats", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_dev_stat_operations);
+
+ s0ix_lat_stat_init();
+
+#ifdef CONFIG_PM_DEBUG
+ /* dynamic debug tracing in every 5 mins */
+ INIT_DEFERRABLE_WORK(&mid_pmu_cxt->log_work, pmu_log_stat);
+ schedule_delayed_work(&mid_pmu_cxt->log_work,
+ msecs_to_jiffies(pmu_stats_interval*1000));
+
+ debug_mask = PMU_DEBUG_PRINT_STATS;
+
+ /* /sys/kernel/debug/pmu_stats_log */
+ fentry = debugfs_create_file("pmu_stats_log", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_stats_log_operations);
+ if (fentry == NULL)
+ printk(KERN_ERR "Failed to create pmu_stats_log debugfs\n");
+#endif
+}
+
+void pmu_s3_stats_update(int enter)
+{
+
+}
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+ cancel_delayed_work_sync(&mid_pmu_cxt->log_work);
+#endif
+ s0ix_lat_stat_finish();
+}
+
+#endif /*if CONFIG_X86_MDFLD_POWER || CONFIG_X86_CLV_POWER*/
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+
+static u32 prev_s0ix_cnt[SYS_STATE_MAX];
+static unsigned long long prev_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_cnt[SYS_STATE_MAX];
+static u32 S3_count;
+static unsigned long long S3_res;
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr,
+ long long uptime)
+{
+ unsigned long long t;
+ u32 scu_val = 0, time = 0;
+ u32 remainder;
+ unsigned long init_2_now_time;
+ unsigned long long tsc_freq = 1330000;
+
+ /* If tsc calibration fails use the default as 1330Mhz */
+ if (tsc_khz)
+ tsc_freq = tsc_khz;
+
+ /* Print S0ix residency counter */
+ if (type == SYS_STATE_S0I0) {
+ for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+ time += cur_s0ix_res[t];
+ } else if (type < SYS_STATE_S3) {
+ t = readq(residency[type]);
+ if (t < prev_s0ix_res[type])
+ t += (((unsigned long long)~0) - prev_s0ix_res[type]);
+ else
+ t -= prev_s0ix_res[type];
+
+ if (type == SYS_STATE_S0I3)
+ t -= prev_s0ix_res[SYS_STATE_S3];
+ } else
+ t = prev_s0ix_res[SYS_STATE_S3];
+
+ if (type == SYS_STATE_S0I0) {
+ /* uptime(nanoS) - sum_res(miliSec) */
+ t = uptime;
+ do_div(t, MICRO_SEC);
+ time = t - time;
+ } else {
+ /* s0ix residency counters are in TSC cycle count domain
+ * convert this to milli second time domain
+ */
+ remainder = do_div(t, tsc_freq);
+
+ /* store time in millisecs */
+ time = (unsigned int)t;
+ }
+ cur_s0ix_res[type] = (unsigned int)time;
+
+ seq_printf(s, "%s\t%5lu.%03lu\t", typestr,
+ (unsigned long)(time/1000), (unsigned long)(time%1000));
+
+ t = uptime;
+ do_div(t, MICRO_SEC); /* time in milli secs */
+
+ /* Note: with millisecs accuracy we get more
+ * precise residency percentages, but we have
+ * to trade off with the max number of days
+ * that we can run without clearing counters,
+ * with 32bit counter this value is ~50days.
+ */
+ init_2_now_time = (unsigned long) t;
+
+ /* for calculating percentage residency */
+ t = (u64)(time);
+ t *= 100;
+
+ /* take care of divide by zero */
+ if (init_2_now_time) {
+ remainder = do_div(t, init_2_now_time);
+ time = (unsigned long) t;
+
+ /* for getting 3 digit precision after
+ * decimal dot */
+ t = (u64) remainder;
+ t *= 1000;
+ remainder = do_div(t, init_2_now_time);
+ } else
+ time = t = 0;
+
+ seq_printf(s, "%5lu.%03lu\t", (unsigned long) time, (unsigned long) t);
+
+ /* Print S0ix counters */
+ if (type == SYS_STATE_S0I0) {
+ for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+ scu_val += cur_s0ix_cnt[t];
+ if (scu_val == 0) /* S0I0 residency 100% */
+ scu_val = 1;
+ } else if (type < SYS_STATE_S3) {
+ scu_val = readl(s0ix_counter[type]);
+ if (scu_val < prev_s0ix_cnt[type])
+ scu_val += (((u32)~0) - prev_s0ix_cnt[type]);
+ else
+ scu_val -= prev_s0ix_cnt[type];
+
+ if (type == SYS_STATE_S0I3)
+ scu_val -= prev_s0ix_cnt[SYS_STATE_S3];
+ } else
+ scu_val = prev_s0ix_cnt[SYS_STATE_S3];
+
+ if (type != SYS_STATE_S0I0)
+ cur_s0ix_cnt[type] = scu_val;
+
+ seq_printf(s, "%5lu\t", (unsigned long) scu_val);
+
+ remainder = 0;
+ t = cur_s0ix_res[type];
+ if (scu_val) { /* s0ix_time in millisecs */
+ do_div(t, scu_val);
+ remainder = do_div(t, 1000);
+ }
+ seq_printf(s, "%5lu.%03lu\n", (unsigned long) t,
+ (unsigned long) remainder);
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+ struct pci_dev *pdev = NULL;
+ int index, i, pmu_num, ss_idx, ss_pos;
+ unsigned int base_class;
+ u32 mask, val, nc_pwr_sts;
+ struct pmu_ss_states cur_pmsss;
+ long long uptime;
+ int ret;
+
+ if (!pmu_initialized)
+ return 0;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ _pmu2_wait_not_busy();
+ pmu_read_sss(&cur_pmsss);
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ seq_printf(s, "SSS: ");
+
+ for (i = 0; i < 4; i++)
+ seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+ seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+ seq_printf(s, "\ttime(secs)\tresidency(%%)\tcount\tAvg.Res(Sec)\n");
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+ /* Dump S0ix residency counters */
+ ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+ if (ret)
+ seq_printf(s, "IPC command to DUMP S0ix residency failed\n");
+
+ /* Dump number of interations of S0ix */
+ ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+ if (ret)
+ seq_printf(s, "IPC command to DUMP S0ix count failed\n");
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ uptime = cpu_clock(0);
+ uptime -= mid_pmu_cxt->pmu_init_time;
+ pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1", uptime);
+ pmu_stat_seq_printf(s, SYS_STATE_LPMP3, "lpmp3", uptime);
+ pmu_stat_seq_printf(s, SYS_STATE_S0I2, "s0i2", uptime);
+ pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3", uptime);
+ pmu_stat_seq_printf(s, SYS_STATE_S3, "s3", uptime);
+ pmu_stat_seq_printf(s, SYS_STATE_S0I0, "s0", uptime);
+
+ val = do_div(uptime, NANO_SEC);
+ seq_printf(s, "\n\nTotal time: %5lu.%03lu Sec\n", (unsigned long)uptime,
+ (unsigned long) val/1000000);
+
+ seq_printf(s, "\nNORTH COMPLEX DEVICES :\n\n");
+
+ nc_pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+ for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+ val = nc_pwr_sts & 3;
+ nc_pwr_sts >>= BITS_PER_LSS;
+ seq_printf(s, "%9s : %s\n", mrfl_nc_devices[i], dstates[val]);
+ }
+
+ seq_printf(s, "\nSOUTH COMPLEX DEVICES :\n\n");
+
+ for_each_pci_dev(pdev) {
+ /* find the base class info */
+ base_class = pdev->class >> 16;
+
+ if (base_class == PCI_BASE_CLASS_BRIDGE)
+ continue;
+
+ if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+ &ss_pos))
+ continue;
+
+ if (pmu_num == PMU_NUM_1)
+ continue;
+
+ mask = (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+ val = (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+ (ss_pos * BITS_PER_LSS);
+
+ seq_printf(s, "pci %04x %04X %s %20.20s: lss:%02d reg:%d ",
+ pdev->vendor, pdev->device, dev_name(&pdev->dev),
+ dev_driver_string(&pdev->dev),
+ index - mid_pmu_cxt->pmu1_max_devs, ss_idx);
+ seq_printf(s, "mask:%08X %s\n", mask, dstates[val & 3]);
+ }
+
+ return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int ret;
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ if (((strlen("clear")+1) == buf_size) &&
+ !strncmp(buf, "clear", strlen("clear"))) {
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ /* Dump S0ix residency counters */
+ ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+ if (ret)
+ printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+ /* Dump number of interations of S0ix */
+ ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+ if (ret)
+ printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ mid_pmu_cxt->pmu_init_time = cpu_clock(0);
+ prev_s0ix_cnt[SYS_STATE_S0I1] = readl(s0ix_counter[SYS_STATE_S0I1]);
+ prev_s0ix_cnt[SYS_STATE_LPMP3] = readl(s0ix_counter[SYS_STATE_LPMP3]);
+ prev_s0ix_cnt[SYS_STATE_S0I2] = readl(s0ix_counter[SYS_STATE_S0I2]);
+ prev_s0ix_cnt[SYS_STATE_S0I3] = readl(s0ix_counter[SYS_STATE_S0I3]);
+ prev_s0ix_cnt[SYS_STATE_S3] = 0;
+ prev_s0ix_res[SYS_STATE_S0I1] = readq(residency[SYS_STATE_S0I1]);
+ prev_s0ix_res[SYS_STATE_LPMP3] = readq(residency[SYS_STATE_LPMP3]);
+ prev_s0ix_res[SYS_STATE_S0I2] = readq(residency[SYS_STATE_S0I2]);
+ prev_s0ix_res[SYS_STATE_S0I3] = readq(residency[SYS_STATE_S0I3]);
+ prev_s0ix_res[SYS_STATE_S3] = 0 ;
+ }
+ return buf_size;
+}
+
+
+static const struct file_operations devices_state_operations = {
+ .open = devices_state_open,
+ .read = seq_read,
+ .write = devices_state_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int ignore_lss_show(struct seq_file *s, void *unused)
+{
+ u32 local_ignore_lss[4];
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ seq_printf(s, "IGNORE_LSS[0]: %08X\n", local_ignore_lss[0]);
+ seq_printf(s, "IGNORE_LSS[1]: %08X\n", local_ignore_lss[1]);
+ seq_printf(s, "IGNORE_LSS[2]: %08X\n", local_ignore_lss[2]);
+ seq_printf(s, "IGNORE_LSS[3]: %08X\n", local_ignore_lss[3]);
+
+ return 0;
+}
+
+static int ignore_add_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_add_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int buf_size = min(count, sizeof(buf)-1);
+ int sub_sys_pos, sub_sys_index;
+ u32 lss, local_ignore_lss[4];
+ u32 pm_cmd_val;
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &lss);
+
+ if (res)
+ return -EINVAL;
+
+ if (lss > MAX_LSS_POSSIBLE)
+ return -EINVAL;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ /* If set to MAX_LSS_POSSIBLE it means
+ * ignore all.
+ */
+ if (lss == MAX_LSS_POSSIBLE) {
+ local_ignore_lss[0] = 0xFFFFFFFF;
+ local_ignore_lss[1] = 0xFFFFFFFF;
+ local_ignore_lss[2] = 0xFFFFFFFF;
+ local_ignore_lss[3] = 0xFFFFFFFF;
+ } else {
+ sub_sys_index = lss / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = lss % mid_pmu_cxt->ss_per_reg;
+
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+ local_ignore_lss[sub_sys_index] |= pm_cmd_val;
+ }
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return buf_size;
+}
+
+static const struct file_operations ignore_add_ops = {
+ .open = ignore_add_open,
+ .read = seq_read,
+ .write = ignore_add_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ignore_remove_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_remove_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int buf_size = min(count, sizeof(buf)-1);
+ int sub_sys_pos, sub_sys_index;
+ u32 lss, local_ignore_lss[4];
+ u32 pm_cmd_val;
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &lss);
+
+ if (res)
+ return -EINVAL;
+
+ if (lss > MAX_LSS_POSSIBLE)
+ return -EINVAL;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ /* If set to MAX_LSS_POSSIBLE it means
+ * remove all from ignore list.
+ */
+ if (lss == MAX_LSS_POSSIBLE) {
+ local_ignore_lss[0] = 0;
+ local_ignore_lss[1] = 0;
+ local_ignore_lss[2] = 0;
+ local_ignore_lss[3] = 0;
+ } else {
+ sub_sys_index = lss / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = lss % mid_pmu_cxt->ss_per_reg;
+
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+ local_ignore_lss[sub_sys_index] &= ~pm_cmd_val;
+ }
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return buf_size;
+}
+
+static const struct file_operations ignore_remove_ops = {
+ .open = ignore_remove_open,
+ .read = seq_read,
+ .write = ignore_remove_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmu_sync_d0ix_show(struct seq_file *s, void *unused)
+{
+ int i;
+ u32 local_os_sss[4];
+ struct pmu_ss_states cur_pmsss;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ _pmu2_wait_not_busy();
+ /* Read SCU SSS */
+ pmu_read_sss(&cur_pmsss);
+ /* Read OS SSS */
+ memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ for (i = 0; i < 4; i++)
+ seq_printf(s, "OS_SSS[%d]: %08X\tSSS[%d]: %08lX\n", i,
+ local_os_sss[i], i, cur_pmsss.pmu2_states[i]);
+
+ return 0;
+}
+
+static int pmu_sync_d0ix_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_sync_d0ix_show, NULL);
+}
+
+static ssize_t pmu_sync_d0ix_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res, i;
+ bool send_cmd;
+ int buf_size = min(count, sizeof(buf)-1);
+ u32 lss, local_os_sss[4];
+ int sub_sys_pos, sub_sys_index;
+ u32 pm_cmd_val;
+ u32 temp_sss;
+
+ struct pmu_ss_states cur_pmsss;
+
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &lss);
+
+ if (res)
+ return -EINVAL;
+
+ if (lss > MAX_LSS_POSSIBLE)
+ return -EINVAL;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ _pmu2_wait_not_busy();
+ /* Read SCU SSS */
+ pmu_read_sss(&cur_pmsss);
+
+ for (i = 0; i < 4; i++)
+ local_os_sss[i] = mid_pmu_cxt->os_sss[i] &
+ ~mid_pmu_cxt->ignore_lss[i];
+
+ send_cmd = false;
+ for (i = 0; i < 4; i++) {
+ if (local_os_sss[i] != cur_pmsss.pmu2_states[i]) {
+ send_cmd = true;
+ break;
+ }
+ }
+
+ if (send_cmd) {
+ int status;
+
+ if (lss == MAX_LSS_POSSIBLE) {
+ memcpy(cur_pmsss.pmu2_states, local_os_sss,
+ (sizeof(u32)*4));
+ } else {
+ bool same;
+ sub_sys_index = lss / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = lss % mid_pmu_cxt->ss_per_reg;
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+ /* dont send d0ix request if its same */
+ same =
+ ((cur_pmsss.pmu2_states[sub_sys_index] & pm_cmd_val)
+ == (mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val));
+
+ if (same)
+ goto unlock;
+
+ cur_pmsss.pmu2_states[sub_sys_index] &= ~pm_cmd_val;
+ temp_sss =
+ mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val;
+ cur_pmsss.pmu2_states[sub_sys_index] |= temp_sss;
+ }
+
+ /* Issue the pmu command to PMU 2
+ * flag is needed to distinguish between
+ * S0ix vs interactive command in pmu_sc_irq()
+ */
+ status = pmu_issue_interactive_command(&cur_pmsss, false,
+ false);
+
+ if (unlikely(status != PMU_SUCCESS)) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Failed to Issue a PM command to PMU2\n");
+ goto unlock;
+ }
+
+ /*
+ * Wait for interactive command to complete.
+ * If we dont wait, there is a possibility that
+ * the driver may access the device before its
+ * powered on in SCU.
+ *
+ */
+ status = _pmu2_wait_not_busy();
+ if (unlikely(status)) {
+ printk(KERN_CRIT "%s: D0ix transition failure\n",
+ __func__);
+ }
+ }
+
+unlock:
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return buf_size;
+}
+
+static const struct file_operations pmu_sync_d0ix_ops = {
+ .open = pmu_sync_d0ix_open,
+ .read = seq_read,
+ .write = pmu_sync_d0ix_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmu_force_d0ix_show(struct seq_file *s, void *unused)
+{
+ int i;
+ u32 local_os_sss[4];
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+ /* Read OS SSS */
+ memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ for (i = 0; i < 4; i++)
+ seq_printf(s, "OS_SSS[%d]: %08X\n", i, local_os_sss[i]);
+
+ return 0;
+}
+
+static int pmu_force_d0ix_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_force_d0ix_show, NULL);
+}
+
+static ssize_t pmu_force_d0i3_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int buf_size = min(count, sizeof(buf)-1);
+ u32 lss, local_os_sss[4];
+ int sub_sys_pos, sub_sys_index;
+ u32 pm_cmd_val;
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &lss);
+
+ if (res)
+ return -EINVAL;
+
+ if (lss > MAX_LSS_POSSIBLE)
+ return -EINVAL;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ if (lss == MAX_LSS_POSSIBLE) {
+ local_os_sss[0] =
+ local_os_sss[1] =
+ local_os_sss[2] =
+ local_os_sss[3] = 0xFFFFFFFF;
+ } else {
+ memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+ sub_sys_index = lss / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = lss % mid_pmu_cxt->ss_per_reg;
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+ local_os_sss[sub_sys_index] |= pm_cmd_val;
+ }
+
+ memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return buf_size;
+}
+
+static const struct file_operations pmu_force_d0i3_ops = {
+ .open = pmu_force_d0ix_open,
+ .read = seq_read,
+ .write = pmu_force_d0i3_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t pmu_force_d0i0_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int buf_size = min(count, sizeof(buf)-1);
+ u32 lss, local_os_sss[4];
+ int sub_sys_pos, sub_sys_index;
+ u32 pm_cmd_val;
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &lss);
+
+ if (res)
+ return -EINVAL;
+
+ if (lss > MAX_LSS_POSSIBLE)
+ return -EINVAL;
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ if (lss == MAX_LSS_POSSIBLE) {
+ local_os_sss[0] =
+ local_os_sss[1] =
+ local_os_sss[2] =
+ local_os_sss[3] = 0;
+ } else {
+ memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+ sub_sys_index = lss / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = lss % mid_pmu_cxt->ss_per_reg;
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+ local_os_sss[sub_sys_index] &= ~pm_cmd_val;
+ }
+
+ memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return buf_size;
+}
+
+static const struct file_operations pmu_force_d0i0_ops = {
+ .open = pmu_force_d0ix_open,
+ .read = seq_read,
+ .write = pmu_force_d0i0_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int cstate_ignore_add_show(struct seq_file *s, void *unused)
+{
+ int i;
+ seq_printf(s, "CSTATES IGNORED: ");
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++)
+ if ((mid_pmu_cxt->cstate_ignore & (1 << i)))
+ seq_printf(s, "%d, ", i+1);
+
+ seq_printf(s, "\n");
+ return 0;
+}
+
+static int cstate_ignore_add_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cstate_ignore_add_show, NULL);
+}
+
+static ssize_t cstate_ignore_add_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int cstate;
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &cstate);
+
+ if (res)
+ return -EINVAL;
+
+ if (cstate > MAX_CSTATES_POSSIBLE)
+ return -EINVAL;
+
+ /* cannot add/remove C0, C1 */
+ if (((cstate == 0) || (cstate == 1))) {
+ printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+ return -EINVAL;
+ }
+
+ if (!mid_pmu_cxt->cstate_qos)
+ return -EINVAL;
+
+ if (cstate == MAX_CSTATES_POSSIBLE) {
+ mid_pmu_cxt->cstate_ignore = ((1 << CPUIDLE_STATE_MAX) - 1);
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ CSTATE_EXIT_LATENCY_C1 - 1);
+ } else {
+ u32 cstate_exit_latency[CPUIDLE_STATE_MAX+1];
+ u32 local_cstate_allowed;
+ int max_cstate_allowed;
+
+ /* 0 is C1 state */
+ cstate--;
+ mid_pmu_cxt->cstate_ignore |= (1 << cstate);
+
+ /* by default remove C1 from ignore list */
+ mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+ /* populate cstate latency table */
+ cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+ cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+ cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+ cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+ cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+ cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+ cstate_exit_latency[10] = PM_QOS_DEFAULT_VALUE;
+
+ local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+
+ /* restrict to max c-states */
+ local_cstate_allowed &= ((1<<CPUIDLE_STATE_MAX)-1);
+
+ /* If no states allowed will return 0 */
+ max_cstate_allowed = fls(local_cstate_allowed);
+
+ printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+ max_cstate_allowed, local_cstate_allowed);
+ printk(KERN_CRIT "exit latency = %d\n",
+ (cstate_exit_latency[max_cstate_allowed]-1));
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ (cstate_exit_latency[max_cstate_allowed]-1));
+ }
+
+ return buf_size;
+}
+
+static const struct file_operations cstate_ignore_add_ops = {
+ .open = cstate_ignore_add_open,
+ .read = seq_read,
+ .write = cstate_ignore_add_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int cstate_ignore_remove_show(struct seq_file *s, void *unused)
+{
+ int i;
+ seq_printf(s, "CSTATES ALLOWED: ");
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++)
+ if (!(mid_pmu_cxt->cstate_ignore & (1 << i)))
+ seq_printf(s, "%d, ", i+1);
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int cstate_ignore_remove_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cstate_ignore_remove_show, NULL);
+}
+
+static ssize_t cstate_ignore_remove_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int cstate;
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &cstate);
+
+ if (res)
+ return -EINVAL;
+
+ if (cstate > MAX_CSTATES_POSSIBLE)
+ return -EINVAL;
+
+ /* cannot add/remove C0, C1 */
+ if (((cstate == 0) || (cstate == 1))) {
+ printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+ return -EINVAL;
+ }
+
+ if (!mid_pmu_cxt->cstate_qos)
+ return -EINVAL;
+
+ if (cstate == MAX_CSTATES_POSSIBLE) {
+ mid_pmu_cxt->cstate_ignore =
+ ~((1 << CPUIDLE_STATE_MAX) - 1);
+ /* Ignore C2, C3, C5, C8 and C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ PM_QOS_DEFAULT_VALUE);
+ } else {
+ u32 cstate_exit_latency[CPUIDLE_STATE_MAX+1];
+ u32 local_cstate_allowed;
+ int max_cstate_allowed;
+
+ /* populate cstate latency table */
+ cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+ cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+ cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+ cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+ cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+ cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+ cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+ cstate_exit_latency[10] = PM_QOS_DEFAULT_VALUE;
+
+ /* 0 is C1 state */
+ cstate--;
+ mid_pmu_cxt->cstate_ignore &= ~(1 << cstate);
+
+ /* by default remove C1 from ignore list */
+ mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+ /* Ignore C2, C3, C5, C8 and C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+ local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+ /* restrict to max c-states */
+ local_cstate_allowed &= ((1<<CPUIDLE_STATE_MAX)-1);
+
+ /* If no states allowed will return 0 */
+ max_cstate_allowed = fls(local_cstate_allowed);
+ printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+ max_cstate_allowed, local_cstate_allowed);
+ printk(KERN_CRIT "exit latency = %d\n",
+ (cstate_exit_latency[max_cstate_allowed]-1));
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ (cstate_exit_latency[max_cstate_allowed]-1));
+ }
+
+ return buf_size;
+}
+
+static const struct file_operations cstate_ignore_remove_ops = {
+ .open = cstate_ignore_remove_open,
+ .read = seq_read,
+ .write = cstate_ignore_remove_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int s3_ctrl_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "%d\n", enable_s3);
+ return 0;
+}
+
+static int s3_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, s3_ctrl_show, NULL);
+}
+
+static ssize_t s3_ctrl_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int res;
+ int local_s3_ctrl;
+ int buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, userbuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ res = kstrtou32(buf, 10, &local_s3_ctrl);
+
+ if (res)
+ return -EINVAL;
+
+ enable_s3 = local_s3_ctrl ? 1 : 0;
+
+ if (enable_s3)
+ __pm_relax(mid_pmu_cxt->pmu_wake_lock);
+ else
+ __pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+
+ return buf_size;
+}
+
+static const struct file_operations s3_ctrl_ops = {
+ .open = s3_ctrl_open,
+ .read = seq_read,
+ .write = s3_ctrl_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+unsigned int pmu_get_new_cstate(unsigned int cstate, int *index)
+{
+ static int cstate_index_table[CPUIDLE_STATE_MAX] = {
+ 1, 1, 1, 1, 1, 2, 3, 3, 4, 4};
+ unsigned int new_cstate = cstate;
+ u32 local_cstate = (u32)(cstate);
+ u32 local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+ u32 cstate_mask, cstate_no_s0ix_mask = (u32)((1 << 6) - 1);
+
+ if (platform_is(INTEL_ATOM_MRFLD)) {
+ /* cstate is also 7 for C9 so correct */
+ if ((local_cstate == 7) && (*index == 4))
+ local_cstate = 9;
+
+ /* get next low cstate allowed */
+ cstate_mask = (u32)((1 << local_cstate)-1);
+ /* in case if cstate == 0 which should not be the case*/
+ cstate_mask |= 1;
+ local_cstate_allowed &= ((1<<CPUIDLE_STATE_MAX)-1);
+ local_cstate_allowed &= cstate_mask;
+ if (!could_do_s0ix())
+ local_cstate_allowed &= cstate_no_s0ix_mask;
+ new_cstate = fls(local_cstate_allowed);
+
+ *index = cstate_index_table[new_cstate-1];
+ }
+
+ return new_cstate;
+}
+#endif
+
+DEFINE_PER_CPU(u64[NUM_CSTATES_RES_MEASURE], c_states_res);
+
+static int read_c_states_res(void)
+{
+ int cpu, i;
+ u32 lo, hi;
+
+ u32 c_states_res_msr[NUM_CSTATES_RES_MEASURE] = {
+ PUNIT_CR_CORE_C1_RES_MSR,
+ PUNIT_CR_CORE_C4_RES_MSR,
+ PUNIT_CR_CORE_C6_RES_MSR
+ };
+
+ for_each_online_cpu(cpu)
+ for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+ u64 temp;
+ rdmsr_on_cpu(cpu, c_states_res_msr[i], &lo, &hi);
+ temp = hi;
+ temp <<= 32;
+ temp |= lo;
+ per_cpu(c_states_res, cpu)[i] = temp;
+ }
+
+ return 0;
+}
+
+static int c_states_stat_show(struct seq_file *s, void *unused)
+{
+ char *c_states_name[] = {
+ "C1",
+ "C4",
+ "C6"
+ };
+
+ int i, cpu;
+
+ seq_printf(s, "C STATES: %20s\n", "Residecy");
+ for_each_online_cpu(cpu)
+ seq_printf(s, "%18s %d", "Core", cpu);
+ seq_printf(s, "\n");
+
+ read_c_states_res();
+ for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+ seq_printf(s, "%s", c_states_name[i]);
+ for_each_online_cpu(cpu)
+ seq_printf(s, "%18llu", per_cpu(c_states_res, cpu)[i]);
+ seq_printf(s, "\n");
+ }
+ return 0;
+}
+
+static int c_states_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, c_states_stat_show, NULL);
+}
+
+static const struct file_operations c_states_stat_ops = {
+ .open = c_states_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*These are place holders and will be enabled in next patch*/
+
+void pmu_log_pmu_irq(int status) { return; };
+void pmu_log_ipc_irq(void) { return; };
+void pmu_log_ipc(u32 command) { return; };
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) { return; };
+void pmu_dump_logs(void) { return; };
+void pmu_stat_start(enum sys_state type) { return; };
+void pmu_stat_end(void) { return; };
+void pmu_stat_error(u8 err_type) { return; };
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) { return; };
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+ if (mid_pmu_cxt->cstate_qos) {
+ pm_qos_remove_request(mid_pmu_cxt->cstate_qos);
+ kfree(mid_pmu_cxt->cstate_qos);
+ mid_pmu_cxt->cstate_qos = NULL;
+ }
+#endif
+
+ return;
+}
+
+void pmu_s3_stats_update(int enter)
+{
+#ifdef CONFIG_PM_DEBUG
+ int ret;
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+ /* Dump S0ix residency counters */
+ ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+ if (ret)
+ printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+ /* Dump number of interations of S0ix */
+ ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+ if (ret)
+ printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ if (enter == 1) {
+ S3_count = readl(s0ix_counter[SYS_STATE_S0I3]);
+ S3_res = readq(residency[SYS_STATE_S0I3]);
+ } else {
+ prev_s0ix_cnt[SYS_STATE_S3] +=
+ (readl(s0ix_counter[SYS_STATE_S0I3])) - S3_count;
+ prev_s0ix_res[SYS_STATE_S3] += (readq(residency[SYS_STATE_S0I3])) - S3_res;
+ }
+
+#endif
+ return;
+}
+
+
+void pmu_stats_init(void)
+{
+ /* /sys/kernel/debug/mid_pmu_states */
+ (void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+ NULL, NULL, &devices_state_operations);
+
+ /* /sys/kernel/debug/c_p_states_stat */
+ (void) debugfs_create_file("c_states_stat", S_IFREG | S_IRUGO,
+ NULL, NULL, &c_states_stat_ops);
+#ifdef CONFIG_PM_DEBUG
+ if (platform_is(INTEL_ATOM_MRFLD)) {
+ /* If s0ix is disabled then restrict to C6 */
+ if (!enable_s0ix) {
+ mid_pmu_cxt->cstate_ignore =
+ ~((1 << CPUIDLE_STATE_MAX) - 1);
+
+ /* Ignore C2, C3, C5 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+ /* For now ignore C7, C8, C9, C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 6);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 8);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+ } else {
+ mid_pmu_cxt->cstate_ignore =
+ ~((1 << CPUIDLE_STATE_MAX) - 1);
+
+ /* Ignore C2, C3, C5, C8 and C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+ }
+
+ mid_pmu_cxt->cstate_qos =
+ kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+ if (mid_pmu_cxt->cstate_qos) {
+ pm_qos_add_request(mid_pmu_cxt->cstate_qos,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ }
+
+ /* If s0ix is disabled then restrict to C6 */
+ if (!enable_s0ix) {
+ /* Restrict platform Cx state to C6 */
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ (CSTATE_EXIT_LATENCY_S0i1-1));
+ }
+
+ /* /sys/kernel/debug/ignore_add */
+ (void) debugfs_create_file("ignore_add", S_IFREG | S_IRUGO,
+ NULL, NULL, &ignore_add_ops);
+ /* /sys/kernel/debug/ignore_remove */
+ (void) debugfs_create_file("ignore_remove", S_IFREG | S_IRUGO,
+ NULL, NULL, &ignore_remove_ops);
+ /* /sys/kernel/debug/pmu_sync_d0ix */
+ (void) debugfs_create_file("pmu_sync_d0ix", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_sync_d0ix_ops);
+ /* /sys/kernel/debug/pmu_force_d0i0 */
+ (void) debugfs_create_file("pmu_force_d0i0", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_force_d0i0_ops);
+ /* /sys/kernel/debug/pmu_force_d0i3 */
+ (void) debugfs_create_file("pmu_force_d0i3", S_IFREG | S_IRUGO,
+ NULL, NULL, &pmu_force_d0i3_ops);
+ /* /sys/kernel/debug/cstate_ignore_add */
+ (void) debugfs_create_file("cstate_ignore_add",
+ S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_add_ops);
+ /* /sys/kernel/debug/cstate_ignore_remove */
+ (void) debugfs_create_file("cstate_ignore_remove",
+ S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_remove_ops);
+ /* /sys/kernel/debug/cstate_ignore_remove */
+ (void) debugfs_create_file("s3_ctrl",
+ S_IFREG | S_IRUGO, NULL, NULL, &s3_ctrl_ops);
+ }
+#endif
+}
+
+#endif /*if CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER*/
--- /dev/null
+/*
+ * intel_soc_pm_debug.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _INTEL_SOC_PM_DEBUG_H
+#define _INTEL_SOC_PM_DEBUG_H
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_pmu.h"
+
+
+#define NANO_SEC 1000000000UL /* 10^9 in sec */
+#define MICRO_SEC 1000000UL /* 10^6 in sec */
+#define PMU_LOG_INTERVAL_SECS (60*5) /* 5 mins in secs */
+
+#define S0IX_LAT_SRAM_ADDR_CLVP 0xFFFF7FD0
+#define S0IX_LAT_SRAM_SIZE_CLVP 8
+
+#define IPC_CMD_S0IX_LATENCY_CLVP 0xCE
+#define IPC_SUB_MEASURE_START_CLVP 0x00
+#define IPC_SUB_MEASURE_STOP_CLVP 0x01
+
+struct simple_stat {
+ u64 min;
+ u64 max;
+ u64 total;
+ u64 curr;
+};
+
+struct entry_exit_stat {
+ struct simple_stat entry;
+ struct simple_stat exit;
+};
+
+struct latency_stat {
+ struct entry_exit_stat scu_latency[SYS_STATE_MAX];
+ struct entry_exit_stat os_latency[SYS_STATE_MAX];
+ struct simple_stat s3_parts_lat[MAX_S3_PARTS];
+ u64 count[SYS_STATE_MAX];
+ u32 __iomem *scu_s0ix_lat_addr;
+ struct dentry *dentry;
+ bool latency_measure;
+};
+
+struct island {
+ int type;
+ int index;
+ char *name;
+};
+
+struct lss_definition {
+ char *lss_name;
+ char *block;
+ char *subsystem;
+};
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define PUNIT_CR_CORE_C1_RES_MSR 0x660
+#define PUNIT_CR_CORE_C4_RES_MSR 0x3fc
+#define PUNIT_CR_CORE_C6_RES_MSR 0x3fd
+
+#define NUM_CSTATES_RES_MEASURE 3
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern u32 __iomem *residency[];
+extern u32 __iomem *s0ix_counter[];
+
+#endif
+
+/* platform dependency starts */
+#ifdef CONFIG_INTEL_REMOVEME_ATOM_MDFLD_POWER
+
+#define DEV_GFX 2
+#define FUNC_GFX 0
+#define ISLANDS_GFX 8
+#define DEV_ISP 3
+#define FUNC_ISP 0
+#define ISLANDS_ISP 2
+#define NC_DEVS 2
+
+static struct lss_definition lsses[] = {
+ {"Lss00", "Storage", "SDIO0 (HC2)"},
+ {"Lss01", "Storage", "eMMC0 (HC0a)"},
+ {"NA", "Storage", "ND_CTL (Note 5)"},
+ {"Lss03", "H S I", "H S I DMA"},
+ {"Lss04", "Security", "RNG"},
+ {"Lss05", "Storage", "eMMC1 (HC0b)"},
+ {"Lss06", "USB", "USB OTG (ULPI)"},
+ {"Lss07", "USB", "USB_SPH"},
+ {"Lss08", "Audio", ""},
+ {"Lss09", "Audio", ""},
+ {"Lss10", "SRAM", " SRAM CTL+SRAM_16KB"},
+ {"Lss11", "SRAM", " SRAM CTL+SRAM_16KB"},
+ {"Lss12", "SRAM", "SRAM BANK (16KB+3x32KBKB)"},
+ {"Lss13", "SRAM", "SRAM BANK(4x32KB)"},
+ {"Lss14", "SDIO COMMS", "SDIO2 (HC1b)"},
+ {"Lss15", "PTI, DAFCA", " DFX Blocks"},
+ {"Lss16", "SC", " DMA"},
+ {"NA", "SC", "SPI0/MSIC"},
+ {"Lss18", "GP", "SPI1"},
+ {"Lss19", "GP", " SPI2"},
+ {"Lss20", "GP", " I2C0"},
+ {"Lss21", "GP", " I2C1"},
+ {"NA", "Fabrics", " Main Fabric"},
+ {"NA", "Fabrics", " Secondary Fabric"},
+ {"NA", "SC", "SC Fabric"},
+ {"Lss25", "Audio", " I-RAM BANK1 (32 + 256KB)"},
+ {"NA", "SCU", " ROM BANK1 (18KB+18KB+18KB)"},
+ {"Lss27", "GP", "I2C2"},
+ {"NA", "SSC", "SSC (serial bus controller to FLIS)"},
+ {"Lss29", "Security", "Chaabi AON Registers"},
+ {"Lss30", "SDIO COMMS", "SDIO1 (HC1a)"},
+ {"NA", "SCU", "I-RAM BANK0 (32KB)"},
+ {"NA", "SCU", "I-RAM BANK1 (32KB)"},
+ {"Lss33", "GP", "I2C3 (HDMI)"},
+ {"Lss34", "GP", "I2C4"},
+ {"Lss35", "GP", "I2C5"},
+ {"Lss36", "GP", "SSP (SPI3)"},
+ {"Lss37", "GP", "GPIO1"},
+ {"NA", "GP", "GP Fabric"},
+ {"Lss39", "SC", "GPIO0"},
+ {"Lss40", "SC", "KBD"},
+ {"Lss41", "SC", "UART2:0"},
+ {"NA", "NA", "NA"},
+ {"NA", "NA", "NA"},
+ {"Lss44", "Security", " Security TAPC"},
+ {"NA", "MISC", "AON Timers"},
+ {"NA", "PLL", "LFHPLL and Spread Spectrum"},
+ {"NA", "PLL", "USB PLL"},
+ {"NA", "NA", "NA"},
+ {"NA", "Audio", "SLIMBUS CTL 1 (note 5)"},
+ {"NA", "Audio", "SLIMBUS CTL 2 (note 5)"},
+ {"Lss51", "Audio", "SSP0"},
+ {"Lss52", "Audio", "SSP1"},
+ {"NA", "Bridge", "IOSF to OCP Bridge"},
+ {"Lss54", "GP", "DMA"},
+ {"NA", "SC", "SVID (Serial Voltage ID)"},
+ {"NA", "SOC Fuse", "SoC Fuse Block (note 3)"},
+ {"NA", "NA", "NA"},
+};
+#endif
+
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER
+
+#define DEV_GFX 2
+#define FUNC_GFX 0
+#define ISLANDS_GFX 8
+#define DEV_ISP 3
+#define FUNC_ISP 0
+#define ISLANDS_ISP 2
+#define NC_DEVS 2
+
+static struct lss_definition lsses[] = {
+ {"Lss00", "Storage", "SDIO0 (HC2)"},
+ {"Lss01", "Storage", "eMMC0 (HC0a)"},
+ {"NA", "Timer", "AONT"},
+ {"Lss03", "H S I", "H S I DMA"},
+ {"Lss04", "Security", "RNG"},
+ {"Lss05", "Storage", "eMMC1 (HC0b)"},
+ {"Lss06", "USB", "USB OTG (ULPI)"},
+ {"Lss07", "USB", "USB_SPH"},
+ {"Lss08", "Audio", "Audio ENGINE"},
+ {"Lss09", "Audio", "Audio DMA"},
+ {"Lss10", "SRAM", " SRAM CTL+SRAM_16KB"},
+ {"Lss11", "SRAM", " SRAM CTL+SRAM_16KB"},
+ {"Lss12", "SRAM", "SRAM BANK (16KB+3x32KBKB)"},
+ {"Lss13", "SRAM", "SRAM BANK(4x32KB)"},
+ {"Lss14", "SDIO COMMS", "SDIO2 (HC1b)"},
+ {"Lss15", "PTI, DAFCA", " DFX Blocks"},
+ {"Lss16", "SC", " DMA"},
+ {"NA", "SC", "SPI0/MSIC"},
+ {"Lss18", "GP", "SPI1"},
+ {"Lss19", "GP", " SPI2"},
+ {"Lss20", "GP", " I2C0"},
+ {"Lss21", "GP", " I2C1"},
+ {"NA", "Timer", "HPET"},
+ {"NA", "Timer", "External Timer"},
+ {"NA", "SC", "SC Fabric"},
+ {"Lss25", "Audio", " I-RAM BANK1 (32 + 256KB)"},
+ {"NA", "SCU", " ROM BANK1 (18KB+18KB+18KB)"},
+ {"Lss27", "GP", "I2C2"},
+ {"NA", "SSC", "SSC (serial bus controller to FLIS)"},
+ {"Lss29", "Security", "Chaabi AON Registers"},
+ {"Lss30", "SDIO COMMS", "SDIO1 (HC1a)"},
+ {"NA", "Timer", "vRTC"},
+ {"NA", "Security", "Security Timer"},
+ {"Lss33", "GP", "I2C3 (HDMI)"},
+ {"Lss34", "GP", "I2C4"},
+ {"Lss35", "GP", "I2C5"},
+ {"Lss36", "GP", "SSP (SPI3)"},
+ {"Lss37", "GP", "GPIO1"},
+ {"NA", "MSIC", "Power Button"},
+ {"Lss39", "SC", "GPIO0"},
+ {"Lss40", "SC", "KBD"},
+ {"Lss41", "SC", "UART2:0"},
+ {"NA", "MSIC", "ADC"},
+ {"NA", "MSIC", "Charger"},
+ {"Lss44", "Security", " Security TAPC"},
+ {"NA", "MSIC", "AON Timers"},
+ {"NA", "MSIC", "GPI"},
+ {"NA", "MSIC", "BCU"},
+ {"NA", "NA", "SSP2"},
+ {"NA", "Audio", "SLIMBUS CTL 1 (note 5)"},
+ {"NA", "Audio", "SLIMBUS CTL 2 (note 5)"},
+ {"Lss51", "Audio", "SSP0"},
+ {"Lss52", "Audio", "SSP1"},
+ {"NA", "Bridge", "IOSF to OCP Bridge"},
+ {"Lss54", "GP", "DMA"},
+ {"NA", "MSIC", "RESET"},
+ {"NA", "SOC Fuse", "SoC Fuse Block (note 3)"},
+ {"NA", "NA", "NA"},
+ {"Lss58", "NA", "SSP4"},
+};
+#endif
+/* platform dependency ends */
+
+#endif
--- /dev/null
+/*
+ * intel_soc_pmu.c - This driver provides interface to configure the 2 pmu's
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+#include <linux/cpuidle.h>
+#include <linux/proc_fs.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#ifdef CONFIG_DRM_INTEL_MID
+#define GFX_ENABLE
+#endif
+
+bool pmu_initialized;
+
+DEFINE_MUTEX(pci_root_lock);
+
+/* mid_pmu context structure */
+struct mid_pmu_dev *mid_pmu_cxt;
+
+struct platform_pmu_ops *pmu_ops;
+/*
+ * Locking strategy::
+ *
+ * one semaphore (scu_ready sem) is used for accessing busy bit,
+ * issuing interactive cmd in the code.
+ * The entry points in pmu driver are pmu_pci_set_power_state()
+ * and PMU interrupt handler contexts, so here is the flow of how
+ * the semaphore is used.
+ *
+ * In D0ix command case::
+ * set_power_state process context:
+ * set_power_state()->acquire_scu_ready_sem()->issue_interactive_cmd->
+ * wait_for_interactive_complete->release scu_ready sem
+ *
+ * PMU Interrupt context:
+ * pmu_interrupt_handler()->release interactive_complete->return
+ *
+ * In Idle handler case::
+ * Idle context:
+ * idle_handler()->try_acquire_scu_ready_sem->if acquired->
+ * issue s0ix command->return
+ *
+ * PMU Interrupt context:
+ * pmu_Interrupt_handler()->release scu_ready_sem->return
+ *
+ */
+
+/* Maps pci power states to SCU D0ix mask */
+static int pci_to_platform_state(pci_power_t pci_state)
+{
+
+ static int mask[] = {D0I0_MASK, D0I1_MASK,
+ D0I2_MASK, D0I3_MASK, D0I3_MASK};
+
+ int state = D0I0_MASK;
+
+ if (pci_state > 4)
+ WARN(1, "%s: wrong pci_state received.\n", __func__);
+
+ else
+ state = mask[pci_state];
+
+ return state;
+}
+
+/* Maps power states to pmu driver's internal indexes */
+int mid_state_to_sys_state(int mid_state)
+{
+ int sys_state = 0;
+ switch (mid_state) {
+ case MID_S0I1_STATE:
+ sys_state = SYS_STATE_S0I1;
+ break;
+ case MID_LPMP3_STATE:
+ sys_state = SYS_STATE_S0I2;
+ break;
+ case MID_S0I3_STATE:
+ sys_state = SYS_STATE_S0I3;
+ break;
+ case MID_S3_STATE:
+ sys_state = SYS_STATE_S3;
+ break;
+
+ case C6_HINT:
+ sys_state = SYS_STATE_S0I0;
+ }
+
+ return sys_state;
+}
+
+/* PCI Device Id structure */
+static DEFINE_PCI_DEVICE_TABLE(mid_pm_ids) = {
+ {PCI_VDEVICE(INTEL, MID_PMU_MFLD_DRV_DEV_ID), 0},
+ {PCI_VDEVICE(INTEL, MID_PMU_CLV_DRV_DEV_ID), 0},
+ {PCI_VDEVICE(INTEL, MID_PMU_MRFL_DRV_DEV_ID), 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, mid_pm_ids);
+
+char s0ix[5] = "s0ix";
+
+module_param_call(s0ix, set_extended_cstate_mode,
+ get_extended_cstate_mode, NULL, 0644);
+
+MODULE_PARM_DESC(s0ix,
+ "setup extended c state s0ix mode [s0i3|s0i1|lmp3|"
+ "i1i3|lpi1|lpi3|s0ix|none]");
+
+/**
+ * This function set all devices in d0i0 and deactivates pmu driver.
+ * The function is used before IFWI update as it needs devices to be
+ * in d0i0 during IFWI update. Reboot is needed to work pmu
+ * driver properly again. After calling this function and IFWI
+ * update, system is always rebooted as IFWI update function,
+ * intel_scu_ipc_medfw_upgrade() is called from mrst_emergency_reboot().
+ */
+int pmu_set_devices_in_d0i0(void)
+{
+ int status;
+ struct pmu_ss_states cur_pmssc;
+
+ /* Ignore request until we have initialized */
+ if (unlikely((!pmu_initialized)))
+ return 0;
+
+ cur_pmssc.pmu2_states[0] = D0I0_MASK;
+ cur_pmssc.pmu2_states[1] = D0I0_MASK;
+ cur_pmssc.pmu2_states[2] = D0I0_MASK;
+ cur_pmssc.pmu2_states[3] = D0I0_MASK;
+
+ /* Restrict platform Cx state to C6 */
+ pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+ (CSTATE_EXIT_LATENCY_S0i1-1));
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ mid_pmu_cxt->shutdown_started = true;
+
+ /* Issue the pmu command to PMU 2
+ * flag is needed to distinguish between
+ * S0ix vs interactive command in pmu_sc_irq()
+ */
+ status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+ if (unlikely(status != PMU_SUCCESS)) { /* pmu command failed */
+ printk(KERN_CRIT "%s: Failed to Issue a PM command to PMU2\n",
+ __func__);
+ mid_pmu_cxt->shutdown_started = false;
+
+ /* allow s0ix now */
+ pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+ PM_QOS_DEFAULT_VALUE);
+ goto unlock;
+ }
+
+ if (_pmu2_wait_not_busy()) {
+ pmu_dump_logs();
+ BUG();
+ }
+
+unlock:
+ up(&mid_pmu_cxt->scu_ready_sem);
+ return status;
+}
+EXPORT_SYMBOL(pmu_set_devices_in_d0i0);
+
+static int _pmu_read_status(int type)
+{
+ u32 temp;
+ union pmu_pm_status result;
+
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_sts);
+
+ /* extract the busy bit */
+ result.pmu_status_value = temp;
+
+ if (type == PMU_BUSY_STATUS)
+ return result.pmu_status_parts.pmu_busy;
+ else if (type == PMU_MODE_ID)
+ return result.pmu_status_parts.mode_id;
+
+ return 0;
+}
+
+int _pmu2_wait_not_busy(void)
+{
+ int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+ /* wait 500ms that the latest pmu command finished */
+ do {
+ if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+ return 0;
+
+ udelay(1);
+ } while (--pmu_busy_retry);
+
+ WARN(1, "pmu2 busy!");
+
+ return -EBUSY;
+}
+
+static int _pmu2_wait_not_busy_yield(void)
+{
+ int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+ /* wait max 500ms that the latest pmu command finished */
+ do {
+ if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+ return 0;
+
+ usleep_range(10, 12);
+ pmu_busy_retry -= 11;
+ } while (pmu_busy_retry > 0);
+
+ WARN(1, "pmu2 busy!");
+
+ return -EBUSY;
+}
+
+static void pmu_write_subsys_config(struct pmu_ss_states *pm_ssc)
+{
+ /* South complex in Penwell has multiple registers for
+ * PM_SSC, etc.
+ */
+ writel(pm_ssc->pmu2_states[0], &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+ writel(pm_ssc->pmu2_states[1], &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+ writel(pm_ssc->pmu2_states[2], &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+ writel(pm_ssc->pmu2_states[3], &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+}
+
+void log_wakeup_irq(void)
+{
+ unsigned int irr = 0, vector = 0;
+ int offset = 0, irq = 0;
+ struct irq_desc *desc;
+ const char *act_name;
+
+ if ((mid_pmu_cxt->pmu_current_state != SYS_STATE_S3)
+ || !mid_pmu_cxt->suspend_started)
+ return;
+
+ for (offset = (FIRST_EXTERNAL_VECTOR/32);
+ offset < (NR_VECTORS/32); offset++) {
+ irr = apic_read(APIC_IRR + (offset * 0x10));
+ while (irr) {
+ vector = __ffs(irr);
+ irr &= ~(1 << vector);
+ irq = __this_cpu_read(
+ vector_irq[vector + (offset * 32)]);
+ if (irq < 0)
+ continue;
+ pr_info("wakeup from IRQ %d\n", irq);
+
+ desc = irq_to_desc(irq);
+
+ if ((desc) && (desc->action)) {
+ act_name = desc->action->name;
+ pr_info("IRQ %d,action name:%s\n",
+ irq,
+ (act_name) ? (act_name) : "no action");
+ }
+ }
+ }
+ return;
+}
+
+static inline int pmu_interrupt_pending(void)
+{
+ u32 temp;
+ union pmu_pm_ics result;
+
+ /* read the pm interrupt status register */
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+ result.pmu_pm_ics_value = temp;
+
+ /* return the pm interrupt status int pending bit info */
+ return result.pmu_pm_ics_parts.int_pend;
+}
+
+static inline void pmu_clear_pending_interrupt(void)
+{
+ u32 temp;
+
+ /* read the pm interrupt status register */
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+ /* write into the PM_ICS register */
+ writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_set_interrupt_enable(void)
+{
+ u32 temp;
+ union pmu_pm_ics result;
+
+ /* read the pm interrupt status register */
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+ result.pmu_pm_ics_value = temp;
+
+ /* Set the interrupt enable bit */
+ result.pmu_pm_ics_parts.int_enable = 1;
+
+ temp = result.pmu_pm_ics_value;
+
+ /* write into the PM_ICS register */
+ writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_clear_interrupt_enable(void)
+{
+ u32 temp;
+ union pmu_pm_ics result;
+
+ /* read the pm interrupt status register */
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+ result.pmu_pm_ics_value = temp;
+
+ /* Clear the interrupt enable bit */
+ result.pmu_pm_ics_parts.int_enable = 0;
+
+ temp = result.pmu_pm_ics_value;
+
+ /* write into the PM_ICS register */
+ writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+static inline int pmu_read_interrupt_status(void)
+{
+ u32 temp;
+ union pmu_pm_ics result;
+
+ /* read the pm interrupt status register */
+ temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+ result.pmu_pm_ics_value = temp;
+
+ if (result.pmu_pm_ics_parts.int_status == 0)
+ return PMU_FAILED;
+
+ /* return the pm interrupt status int pending bit info */
+ return result.pmu_pm_ics_parts.int_status;
+}
+
+/*This function is used for programming the wake capable devices*/
+static void pmu_prepare_wake(int s0ix_state)
+{
+
+ struct pmu_ss_states cur_pmsss;
+
+ /* setup the wake capable devices */
+ if (s0ix_state == MID_S3_STATE) {
+ writel(~IGNORE_S3_WKC0, &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+ writel(~IGNORE_S3_WKC1, &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+ }
+
+ if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+
+ /* Re-program the sub systems state on wakeup as
+ * the current SSS
+ */
+ pmu_read_sss(&cur_pmsss);
+
+ writel(cur_pmsss.pmu2_states[0],
+ &mid_pmu_cxt->pmu_reg->pm_wssc[0]);
+ writel(cur_pmsss.pmu2_states[1],
+ &mid_pmu_cxt->pmu_reg->pm_wssc[1]);
+ writel(cur_pmsss.pmu2_states[2],
+ &mid_pmu_cxt->pmu_reg->pm_wssc[2]);
+ writel(cur_pmsss.pmu2_states[3],
+ &mid_pmu_cxt->pmu_reg->pm_wssc[3]);
+ }
+}
+
+int mid_s0ix_enter(int s0ix_state)
+{
+ int ret = 0;
+
+ if (unlikely(!pmu_ops || !pmu_ops->enter))
+ goto ret;
+
+ /* check if we can acquire scu_ready_sem
+ * if we are not able to then do a c6 */
+ if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+ goto ret;
+
+ /* If PMU is busy, we'll retry on next C6 */
+ if (unlikely(_pmu_read_status(PMU_BUSY_STATUS))) {
+ up(&mid_pmu_cxt->scu_ready_sem);
+ pr_debug("mid_pmu_cxt->scu_read_sem is up\n");
+ goto ret;
+ }
+
+ pmu_prepare_wake(s0ix_state);
+
+ /* no need to proceed if schedule pending */
+ if (unlikely(need_resched())) {
+ pmu_stat_clear();
+ /*set wkc to appropriate value suitable for s0ix*/
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+ up(&mid_pmu_cxt->scu_ready_sem);
+ goto ret;
+ }
+
+ /* entry function for pmu driver ops */
+ if (pmu_ops->enter(s0ix_state))
+ ret = s0ix_state;
+ else {
+ /*set wkc to appropriate value suitable for s0ix*/
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+ }
+
+ret:
+ return ret;
+}
+
+/**
+ * pmu_sc_irq - pmu driver interrupt handler
+ * Context: interrupt context
+ */
+static irqreturn_t pmu_sc_irq(int irq, void *ignored)
+{
+ int status;
+ irqreturn_t ret = IRQ_NONE;
+ int wake_source;
+
+ /* check if interrup pending bit is set, if not ignore interrupt */
+ if (unlikely(!pmu_interrupt_pending())) {
+ goto ret_no_clear;
+ }
+
+ /* read the interrupt status */
+ status = pmu_read_interrupt_status();
+ if (unlikely(status == PMU_FAILED))
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Invalid interrupt source\n");
+
+ switch (status) {
+ case INVALID_INT:
+ goto ret_no_clear;
+
+ case CMD_COMPLETE_INT:
+ break;
+
+ case CMD_ERROR_INT:
+ mid_pmu_cxt->cmd_error_int++;
+ break;
+
+ case SUBSYS_POW_ERR_INT:
+ case NO_ACKC6_INT:
+ case S0ix_MISS_INT:
+ pmu_stat_error(status);
+ break;
+
+ case WAKE_RECEIVED_INT:
+ wake_source = pmu_get_wake_source();
+ trace_printk("wake_from_lss%d\n",
+ wake_source);
+ pmu_stat_end();
+ break;
+ case TRIGGERERR:
+ pmu_dump_logs();
+ WARN(1, "%s: TRIGGERERR caused, but proceeding...\n", __func__);
+ break;
+ }
+
+ pmu_stat_clear();
+
+ /* clear the interrupt pending bit */
+ pmu_clear_pending_interrupt();
+
+ if (pmu_ops->wakeup)
+ pmu_ops->wakeup();
+
+ if (platform_is(INTEL_ATOM_MFLD) ||
+ platform_is(INTEL_ATOM_CLV)) {
+ mid_pmu_cxt->s0ix_entered = 0;
+ /* S0ix case release it */
+ up(&mid_pmu_cxt->scu_ready_sem);
+ }
+
+ ret = IRQ_HANDLED;
+ret_no_clear:
+ /* clear interrupt enable bit */
+ pmu_clear_interrupt_enable();
+
+ return ret;
+}
+
+void pmu_set_s0ix_complete(void)
+{
+ if (pmu_ops->set_s0ix_complete)
+ pmu_ops->set_s0ix_complete();
+}
+EXPORT_SYMBOL(pmu_set_s0ix_complete);
+
+bool pmu_is_s0ix_in_progress(void)
+{
+ bool state = false;
+
+ if (pmu_initialized && mid_pmu_cxt->s0ix_entered)
+ state = true;
+
+ return state;
+}
+EXPORT_SYMBOL(pmu_is_s0ix_in_progress);
+
+static inline u32 find_index_in_hash(struct pci_dev *pdev, int *found)
+{
+ u32 h_index;
+ int i;
+
+ /* assuming pdev is not null */
+ WARN_ON(pdev == NULL);
+
+ /*assuming pdev pionter will not change from platfrom
+ *boot to shutdown*/
+ h_index = jhash_1word((u32) (long) pdev,
+ MID_PCI_INDEX_HASH_INITVALUE) & MID_PCI_INDEX_HASH_MASK;
+
+ /* assume not found */
+ *found = 0;
+
+ for (i = 0; i < MID_PCI_INDEX_HASH_SIZE; i++) {
+ if (likely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == pdev)) {
+ *found = 1;
+ break;
+ }
+
+ /* assume no deletions, hence there shouldn't be any
+ * gaps ie., NULL's */
+ if (unlikely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == NULL)) {
+ /* found NULL, that means we wont have
+ * it in hash */
+ break;
+ }
+
+ h_index = (h_index+1)%MID_PCI_INDEX_HASH_SIZE;
+ }
+
+ /* Assume hash table wont be full */
+ WARN_ON(i == MID_PCI_INDEX_HASH_SIZE);
+
+ return h_index;
+}
+
+static bool is_display_subclass(unsigned int sub_class)
+{
+ /* On MDFLD and CLV, we have display PCI device class 0x30000,
+ * On MRFLD, we have display PCI device class 0x38000
+ */
+
+ if ((sub_class == 0x0 &&
+ (platform_is(INTEL_ATOM_MFLD) ||
+ platform_is(INTEL_ATOM_CLV))) ||
+ (sub_class == 0x80 && platform_is(INTEL_ATOM_MRFLD)))
+ return true;
+
+ return false;
+}
+
+static int get_pci_to_pmu_index(struct pci_dev *pdev)
+{
+ int pm, type;
+ unsigned int base_class;
+ unsigned int sub_class;
+ u8 ss;
+ int index = PMU_FAILED;
+ u32 h_index;
+ int found;
+
+ h_index = find_index_in_hash(pdev, &found);
+
+ if (found)
+ return (int)mid_pmu_cxt->pci_dev_hash[h_index].index;
+
+ /* if not found, h_index would be where
+ * we can insert this */
+
+ base_class = pdev->class >> 16;
+ sub_class = (pdev->class & SUB_CLASS_MASK) >> 8;
+ pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+ /* read the logical sub system id & cap if present */
+ pci_read_config_byte(pdev, pm + 4, &ss);
+
+ type = ss & LOG_SS_MASK;
+ ss = ss & LOG_ID_MASK;
+
+ if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+ is_display_subclass(sub_class))
+ index = 1;
+ else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+ (sub_class == ISP_SUB_CLASS))
+ index = ISP_POS;
+ else if (type) {
+ WARN_ON(ss >= MAX_LSS_POSSIBLE);
+ index = mid_pmu_cxt->pmu1_max_devs + ss;
+ }
+
+ if (index != PMU_FAILED) {
+ /* insert into hash table */
+ mid_pmu_cxt->pci_dev_hash[h_index].pdev = pdev;
+
+ /* assume index never exceeds 0xff */
+ WARN_ON(index > 0xFF);
+
+ mid_pmu_cxt->pci_dev_hash[h_index].index = (u8)index;
+
+ if (index < mid_pmu_cxt->pmu1_max_devs) {
+ set_mid_pci_ss_idx(index, 0);
+ set_mid_pci_ss_pos(index, (u8)index);
+ set_mid_pci_pmu_num(index, PMU_NUM_1);
+ } else if (index >= mid_pmu_cxt->pmu1_max_devs &&
+ index < (mid_pmu_cxt->pmu1_max_devs +
+ mid_pmu_cxt->pmu2_max_devs)) {
+ set_mid_pci_ss_idx(index,
+ (u8)(ss / mid_pmu_cxt->ss_per_reg));
+ set_mid_pci_ss_pos(index,
+ (u8)(ss % mid_pmu_cxt->ss_per_reg));
+ set_mid_pci_pmu_num(index, PMU_NUM_2);
+ } else {
+ index = PMU_FAILED;
+ }
+
+ WARN_ON(index == PMU_FAILED);
+ }
+
+ return index;
+}
+
+static void get_pci_lss_info(struct pci_dev *pdev)
+{
+ int index, pm;
+ unsigned int base_class;
+ unsigned int sub_class;
+ u8 ss, cap;
+ int i;
+ base_class = pdev->class >> 16;
+ sub_class = (pdev->class & SUB_CLASS_MASK) >> 8;
+
+ pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+ /* read the logical sub system id & cap if present */
+ pci_read_config_byte(pdev, pm + 4, &ss);
+ pci_read_config_byte(pdev, pm + 5, &cap);
+
+ /* get the index for the copying of ss info */
+ index = get_pci_to_pmu_index(pdev);
+
+ if ((index == PMU_FAILED) || (index >= MAX_DEVICES))
+ return;
+
+ /* initialize gfx subsystem info */
+ if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+ is_display_subclass(sub_class)) {
+ set_mid_pci_log_id(index, (u32)index);
+ set_mid_pci_cap(index, PM_SUPPORT);
+ } else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+ (sub_class == ISP_SUB_CLASS)) {
+ set_mid_pci_log_id(index, (u32)index);
+ set_mid_pci_cap(index, PM_SUPPORT);
+ } else if (ss && cap) {
+ set_mid_pci_log_id(index, (u32)(ss & LOG_ID_MASK));
+ set_mid_pci_cap(index, cap);
+ }
+
+ for (i = 0; i < PMU_MAX_LSS_SHARE &&
+ get_mid_pci_drv(index, i); i++) {
+ /* do nothing */
+ }
+
+ WARN_ON(i >= PMU_MAX_LSS_SHARE);
+
+ if (i < PMU_MAX_LSS_SHARE) {
+ set_mid_pci_drv(index, i, pdev);
+ set_mid_pci_power_state(index, i, PCI_D3hot);
+ }
+}
+
+static void pmu_enumerate(void)
+{
+ struct pci_dev *pdev = NULL;
+ unsigned int base_class;
+
+ for_each_pci_dev(pdev) {
+ if (platform_is(INTEL_ATOM_MRFLD) &&
+ pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+ continue;
+
+ /* find the base class info */
+ base_class = pdev->class >> 16;
+
+ if (base_class == PCI_BASE_CLASS_BRIDGE)
+ continue;
+
+ get_pci_lss_info(pdev);
+ }
+}
+
+void pmu_read_sss(struct pmu_ss_states *pm_ssc)
+{
+ pm_ssc->pmu2_states[0] =
+ readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+ pm_ssc->pmu2_states[1] =
+ readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+ pm_ssc->pmu2_states[2] =
+ readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+ pm_ssc->pmu2_states[3] =
+ readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+}
+
+
+/*
+ * For all devices in this lss, we check what is the weakest power state
+ *
+ * Thus we dont power down if another device needs more power
+ */
+
+static pci_power_t pmu_pci_get_weakest_state_for_lss(int lss_index,
+ struct pci_dev *pdev, pci_power_t state)
+{
+ int i;
+ pci_power_t weakest = state;
+
+ for (i = 0; i < PMU_MAX_LSS_SHARE; i++) {
+ if (get_mid_pci_drv(lss_index, i) == pdev)
+ set_mid_pci_power_state(lss_index, i, state);
+
+ if (get_mid_pci_drv(lss_index, i) &&
+ (get_mid_pci_power_state(lss_index, i) < weakest))
+ weakest = get_mid_pci_power_state(lss_index, i);
+ }
+ return weakest;
+}
+
+int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+ int *pmu_num, int *ss_idx, int *ss_pos)
+{
+ int i;
+
+ i = get_pci_to_pmu_index(pdev);
+ if (i == PMU_FAILED)
+ return PMU_FAILED;
+
+ *index = i;
+ *ss_pos = get_mid_pci_ss_pos(i);
+ *ss_idx = get_mid_pci_ss_idx(i);
+ *pmu_num = get_mid_pci_pmu_num(i);
+
+ return PMU_SUCCESS;
+}
+
+static bool update_nc_device_states(int i, pci_power_t state)
+{
+ int status = 0;
+ int islands = 0;
+ int reg;
+
+ /* store the display status */
+ if (i == GFX_LSS_INDEX) {
+ mid_pmu_cxt->display_off = (state != PCI_D0);
+ return true;
+ }
+
+ /*Update the Camera status per ISP Driver Suspended/Resumed
+ * ISP power islands are also updated accordingly, otherwise Dx state
+ * in PMCSR refuses to change.
+ */
+ else if (i == ISP_POS) {
+ if (platform_is(INTEL_ATOM_MFLD) ||
+ platform_is(INTEL_ATOM_CLV)) {
+ islands = APM_ISP_ISLAND | APM_IPH_ISLAND;
+ reg = APM_REG_TYPE;
+ } else if (platform_is(INTEL_ATOM_MRFLD)) {
+ islands = TNG_ISP_ISLAND;
+ reg = ISP_SS_PM0;
+ } else
+ return false;
+ status = pmu_nc_set_power_state(islands,
+ (state != PCI_D0) ?
+ OSPM_ISLAND_DOWN : OSPM_ISLAND_UP,
+ reg);
+ if (status)
+ return false;
+ mid_pmu_cxt->camera_off = (state != PCI_D0);
+ return true;
+ }
+
+ return false;
+}
+
+void init_nc_device_states(void)
+{
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+ mid_pmu_cxt->camera_off = true;
+#endif
+
+#ifndef GFX_ENABLE
+ /* If Gfx is disabled
+ * assume s0ix is not blocked
+ * from gfx side
+ */
+ mid_pmu_cxt->display_off = true;
+#endif
+
+ return;
+}
+
+/* FIXME::Currently HSI Modem 7060 (BZ# 28529) is having a issue and
+* it will not go to Low Power State on CVT. So Standby will not work
+* if HSI is enabled.
+* We can choose between Standby/HSI based on enable_stadby 1/0.
+*/
+unsigned int enable_standby __read_mostly;
+module_param(enable_standby, uint, 0000);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s3 __read_mostly = 1;
+int set_enable_s3(const char *val, struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+
+ if (unlikely((!pmu_initialized)))
+ return 0;
+
+ if (platform_is(INTEL_ATOM_MRFLD)) {
+ if (!enable_s3)
+ __pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+ else
+ __pm_relax(mid_pmu_cxt->pmu_wake_lock);
+ }
+
+ return 0;
+}
+module_param_call(enable_s3, set_enable_s3, param_get_uint,
+ &enable_s3, S_IRUGO | S_IWUSR);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s0ix __read_mostly = 1;
+int set_enable_s0ix(const char *val, struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+
+ if (unlikely((!pmu_initialized)))
+ return 0;
+
+ if (platform_is(INTEL_ATOM_MRFLD)) {
+ if (!enable_s0ix) {
+ mid_pmu_cxt->cstate_ignore =
+ ~((1 << CPUIDLE_STATE_MAX) - 1);
+
+ /* Ignore C2, C3, C5 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+ /* For now ignore C7, C8, C9, C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 6);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 8);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+ /* Restrict platform Cx state to C6 */
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ (CSTATE_EXIT_LATENCY_S0i1-1));
+ } else {
+ mid_pmu_cxt->cstate_ignore =
+ ~((1 << CPUIDLE_STATE_MAX) - 1);
+
+ /* Ignore C2, C3, C5, C8 and C10 states */
+ mid_pmu_cxt->cstate_ignore |= (1 << 1);
+ mid_pmu_cxt->cstate_ignore |= (1 << 2);
+ mid_pmu_cxt->cstate_ignore |= (1 << 4);
+ mid_pmu_cxt->cstate_ignore |= (1 << 7);
+ mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+ pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+ PM_QOS_DEFAULT_VALUE);
+ }
+ }
+
+ return 0;
+}
+module_param_call(enable_s0ix, set_enable_s0ix, param_get_uint,
+ &enable_s0ix, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss0 __read_mostly = IGNORE_SSS0;
+module_param(pmu_ignore_lss0, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss1 __read_mostly = IGNORE_SSS1;
+module_param(pmu_ignore_lss1, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss2 __read_mostly = IGNORE_SSS2;
+module_param(pmu_ignore_lss2, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss3 __read_mostly = IGNORE_SSS3;
+module_param(pmu_ignore_lss3, uint, S_IRUGO | S_IWUSR);
+
+int pmu_set_emmc_to_d0i0_atomic(void)
+{
+ u32 pm_cmd_val;
+ u32 new_value;
+ int sub_sys_pos, sub_sys_index;
+ struct pmu_ss_states cur_pmssc;
+ int status = 0;
+
+ if (unlikely((!pmu_initialized)))
+ return 0;
+
+ /* LSS 01 is index = 0, pos = 1 */
+ sub_sys_index = EMMC0_LSS / mid_pmu_cxt->ss_per_reg;
+ sub_sys_pos = EMMC0_LSS % mid_pmu_cxt->ss_per_reg;
+
+ memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+ /*
+ * Give time for possible previous PMU operation to finish in
+ * case where SCU is functioning normally. For SCU crashed case
+ * PMU may stay busy but check if the emmc is accessible.
+ */
+ status = _pmu2_wait_not_busy();
+ if (status) {
+ dev_err(&mid_pmu_cxt->pmu_dev->dev,
+ "PMU2 busy, ignoring as emmc might be already d0i0\n");
+ status = 0;
+ }
+
+ pmu_read_sss(&cur_pmssc);
+
+ /* set D0i0 the LSS bits */
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+ new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+ (~pm_cmd_val);
+ if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+ goto err;
+
+ status = _pmu2_wait_not_busy();
+ if (status)
+ goto err;
+
+ cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+ /* Request SCU for PM interrupt enabling */
+ writel(PMU_PANIC_EMMC_UP_REQ_CMD, mid_pmu_cxt->emergeny_emmc_up_addr);
+
+ status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+ if (unlikely(status != PMU_SUCCESS)) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Failed to Issue a PM command to PMU2\n");
+ goto err;
+
+ }
+
+ /*
+ * Wait for interactive command to complete.
+ * If we dont wait, there is a possibility that
+ * the driver may access the device before its
+ * powered on in SCU.
+ *
+ */
+ if (_pmu2_wait_not_busy()) {
+ pmu_dump_logs();
+ BUG();
+ }
+
+err:
+
+ return status;
+}
+
+
+#define SAVED_HISTORY_ADDRESS_NUM 10
+#define SAVED_HISTORY_NUM 20
+#define PCI_MAX_RECORD_NUM 10
+
+struct saved_nc_power_history {
+ unsigned long long ts;
+ unsigned short pci;
+ unsigned short cpu:4;
+ unsigned short state_type:8;
+ unsigned short real_change:2;
+ int reg_type;
+ int islands;
+ void *address[SAVED_HISTORY_ADDRESS_NUM];
+};
+
+static atomic_t saved_nc_power_history_current = ATOMIC_INIT(-1);
+static struct saved_nc_power_history all_history[SAVED_HISTORY_NUM];
+static struct saved_nc_power_history *get_new_record_history(void)
+{
+ unsigned int ret =
+ atomic_add_return(1, &saved_nc_power_history_current);
+ return &all_history[ret%SAVED_HISTORY_NUM];
+}
+
+static unsigned short pci_need_record[PCI_MAX_RECORD_NUM] = { 0x08c8, 0x0130, };
+static int num_pci_need_record = 2;
+module_param_array(pci_need_record, ushort, &num_pci_need_record, 0644);
+MODULE_PARM_DESC(pci_need_record,
+ "devices need be traced power state transition.");
+
+static bool pci_need_record_power_state(struct pci_dev *pdev)
+{
+ int i;
+ for (i = 0; i < num_pci_need_record; i++)
+ if (pdev->device == pci_need_record[i])
+ return true;
+
+ return false;
+}
+
+static void print_saved_record(struct saved_nc_power_history *record)
+{
+ int i;
+ unsigned long long ts = record->ts;
+ unsigned long nanosec_rem = do_div(ts, 1000000000);
+
+ printk(KERN_INFO "----\n");
+ printk(KERN_INFO "ts[%5lu.%06lu] cpu[%d] is pci[%04x] reg_type[%d] "
+ "state_type[%d] islands[%x] real_change[%d]\n",
+ (unsigned long)ts,
+ nanosec_rem / 1000,
+ record->cpu,
+ record->pci,
+ record->reg_type,
+ record->state_type,
+ record->islands,
+ record->real_change);
+ for (i = 0; i < SAVED_HISTORY_ADDRESS_NUM; i++) {
+ printk(KERN_INFO "%pf real_addr[%p]\n",
+ record->address[i],
+ record->address[i]);
+ }
+}
+
+int verify_stack_ok(unsigned int *good_ebp, unsigned int *_ebp)
+{
+ return ((unsigned int)_ebp & 0xffffe000) ==
+ ((unsigned int)good_ebp & 0xffffe000);
+}
+
+size_t backtrace_safe(void **array, size_t max_size)
+{
+ unsigned int *_ebp, *base_ebp;
+ unsigned int *caller;
+ unsigned int i;
+
+ asm ("movl %%ebp, %0"
+ : "=r" (_ebp)
+ );
+
+ base_ebp = _ebp;
+ caller = (unsigned int *) *(_ebp+1);
+
+ for (i = 0; i < max_size; i++)
+ array[i] = 0;
+ for (i = 0; i < max_size; i++) {
+ array[i] = caller;
+ _ebp = (unsigned int *) *_ebp;
+ if (!verify_stack_ok(base_ebp, _ebp))
+ break;
+ caller = (unsigned int *) *(_ebp+1);
+ }
+
+ return i + 1;
+}
+
+void dump_nc_power_history(void)
+{
+ int i, start;
+ unsigned int total = atomic_read(&saved_nc_power_history_current);
+
+ start = total % SAVED_HISTORY_NUM;
+ printk(KERN_INFO "<----current timestamp\n");
+ printk(KERN_INFO "start[%d] saved[%d]\n",
+ start, total);
+ for (i = start; i >= 0; i--)
+ print_saved_record(&all_history[i]);
+ for (i = SAVED_HISTORY_NUM - 1; i > start; i--)
+ print_saved_record(&all_history[i]);
+}
+EXPORT_SYMBOL(dump_nc_power_history);
+
+static ssize_t debug_read_history(struct file *file, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ dump_nc_power_history();
+
+ return 0;
+}
+
+static ssize_t debug_write_read_history_entry(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ char buf[20] = "0";
+ unsigned long len = min(sizeof(buf) - 1, count);
+ u32 islands;
+ u32 on;
+ int ret;
+
+ /*do nothing if platform is nether medfield or clv*/
+ if (!platform_is(INTEL_ATOM_MFLD) && !platform_is(INTEL_ATOM_CLV))
+ return count;
+
+ if (copy_from_user(buf, buffer, len))
+ return -1;
+
+ buf[len] = 0;
+
+ ret = sscanf(buf, "%x%x", &islands, &on);
+ if (ret == 2)
+ pmu_nc_set_power_state(islands, on, OSPM_REG_TYPE);
+
+ return count;
+}
+
+static const struct file_operations proc_debug_operations = {
+ .owner = THIS_MODULE,
+ .read = debug_read_history,
+ .write = debug_write_read_history_entry,
+};
+
+static int __init debug_read_history_entry(void)
+{
+ struct proc_dir_entry *res = NULL;
+
+ res = proc_create("debug_read_history", S_IRUGO | S_IWUSR, NULL,
+ &proc_debug_operations);
+
+ if (!res)
+ return -ENOMEM;
+
+ return 0;
+}
+device_initcall(debug_read_history_entry);
+
+/**
+ * pmu_nc_set_power_state - Callback function is used by all the devices
+ * in north complex for a platform specific device power on/shutdown.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_set_power_state(int islands, int state_type, int reg)
+{
+ unsigned long flags;
+ struct saved_nc_power_history *record = NULL;
+ int ret = 0;
+ int change;
+
+ spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+ record = get_new_record_history();
+ record->cpu = raw_smp_processor_id();
+ record->ts = cpu_clock(record->cpu);
+ record->islands = islands;
+ record->pci = 0;
+ record->state_type = state_type;
+ backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+ record->real_change = 0;
+ record->reg_type = reg;
+
+ if (pmu_ops->nc_set_power_state) {
+ ret = pmu_ops->nc_set_power_state(islands, state_type,
+ reg, &change);
+ if (change) {
+ record->real_change = 1;
+ record->ts = cpu_clock(record->cpu);
+ }
+ }
+
+ spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(pmu_nc_set_power_state);
+
+/**
+ * pmu_nc_get_power_state - Callback function is used to
+ * query power status of all the devices in north complex.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_get_power_state(int island, int reg_type)
+{
+ u32 pwr_sts;
+ unsigned long flags;
+ int i, lss;
+ int ret = -EINVAL;
+
+ /*do nothing if platform is nether medfield or clv*/
+ if (!platform_is(INTEL_ATOM_MFLD) && !platform_is(INTEL_ATOM_CLV))
+ return 0;
+
+ spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+ switch (reg_type) {
+ case APM_REG_TYPE:
+ pwr_sts = inl(mid_pmu_cxt->apm_base + APM_STS);
+ break;
+ case OSPM_REG_TYPE:
+ pwr_sts = inl(mid_pmu_cxt->ospm_base + OSPM_PM_SSS);
+ break;
+ default:
+ pr_err("%s: invalid argument 'island': %d.\n",
+ __func__, island);
+ goto unlock;
+ }
+
+ for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+ lss = island & (0x1 << i);
+ if (lss) {
+ ret = (pwr_sts >> (2 * i)) & 0x3;
+ break;
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(pmu_nc_get_power_state);
+
+/*
+* update_dev_res - Calulates & Updates the device residency when
+* a device state change occurs.
+* Computation of respective device residency starts when
+* its first state tranisition happens after the pmu driver
+* is initialised.
+*
+*/
+void update_dev_res(int index, pci_power_t state)
+{
+ if (state != PCI_D0) {
+ if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+ mid_pmu_cxt->pmu_dev_res[index].start = cpu_clock(0);
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+ mid_pmu_cxt->pmu_dev_res[index].start;
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_acc = 0;
+ } else{
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+ cpu_clock(0);
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_acc +=
+ (mid_pmu_cxt->pmu_dev_res[index].d0i3_entry -
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_entry);
+ }
+ } else {
+ if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+ mid_pmu_cxt->pmu_dev_res[index].start =
+ cpu_clock(0);
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_entry
+ = mid_pmu_cxt->pmu_dev_res[index].start;
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_acc = 0;
+ } else {
+ mid_pmu_cxt->pmu_dev_res[index].d0i0_entry =
+ cpu_clock(0);
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_acc +=
+ (mid_pmu_cxt->pmu_dev_res[index].d0i0_entry -
+ mid_pmu_cxt->pmu_dev_res[index].d0i3_entry);
+ }
+ }
+ mid_pmu_cxt->pmu_dev_res[index].state = state;
+}
+
+/**
+ * pmu_pci_set_power_state - Callback function is used by all the PCI devices
+ * for a platform specific device power on/shutdown.
+ *
+ */
+int __ref pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ u32 new_value;
+ int i = 0;
+ u32 pm_cmd_val, chk_val;
+ int sub_sys_pos, sub_sys_index;
+ int pmu_num;
+ struct pmu_ss_states cur_pmssc;
+ int status = 0;
+ int retry_times = 0;
+ ktime_t calltime, delta, rettime;
+ struct saved_nc_power_history *record = NULL;
+ bool d3_cold = false;
+
+ /* Ignore callback from devices until we have initialized */
+ if (unlikely((!pmu_initialized)))
+ return 0;
+
+ might_sleep();
+
+ /* Try to acquire the scu_ready_sem, if not
+ * get blocked, until pmu_sc_irq() releases */
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ /*get LSS index corresponding to pdev, its position in
+ *32 bit register and its register numer*/
+ status =
+ pmu_pci_to_indexes(pdev, &i, &pmu_num,
+ &sub_sys_index, &sub_sys_pos);
+
+ if (status)
+ goto unlock;
+
+ if (pci_need_record_power_state(pdev)) {
+ record = get_new_record_history();
+ record->cpu = raw_smp_processor_id();
+ record->ts = cpu_clock(record->cpu);
+ record->islands = 0;
+ record->reg_type = 0;
+ record->pci = pdev->device;
+ record->state_type = state;
+ backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+ record->real_change = 0;
+ }
+
+ /* Ignore HDMI HPD driver d0ix on LSS 0 on MRFLD */
+ if (platform_is(INTEL_ATOM_MRFLD) &&
+ pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+ goto unlock;
+
+ /*in case a LSS is assigned to more than one pdev, we need
+ *to find the shallowest state the LSS should be put into*/
+ state = pmu_pci_get_weakest_state_for_lss(i, pdev, state);
+
+ /*If the LSS corresponds to northcomplex device, update
+ *the status and return*/
+ if (update_nc_device_states(i, state)) {
+ if (mid_pmu_cxt->pmu_dev_res[i].state == state)
+ goto nc_done;
+ else {
+ if (i < MAX_DEVICES)
+ update_dev_res(i, state);
+ goto nc_done;
+ }
+ }
+
+ /* initialize the current pmssc states */
+ memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+ status = _pmu2_wait_not_busy();
+
+ if (status)
+ goto unlock;
+
+ pmu_read_sss(&cur_pmssc);
+
+ /* Read the pm_cmd val & update the value */
+ pm_cmd_val =
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+ /* First clear the LSS bits */
+ new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+ (~pm_cmd_val);
+ mid_pmu_cxt->os_sss[sub_sys_index] &= ~pm_cmd_val;
+
+ if (state != PCI_D0) {
+ pm_cmd_val =
+ (pci_to_platform_state(state) <<
+ (sub_sys_pos * BITS_PER_LSS));
+
+ new_value |= pm_cmd_val;
+
+ mid_pmu_cxt->os_sss[sub_sys_index] |= pm_cmd_val;
+ }
+
+ new_value &= ~mid_pmu_cxt->ignore_lss[sub_sys_index];
+
+ /* nothing to do, so dont do it... */
+ if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+ goto unlock;
+
+ cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+ /* Check if the state is D3_cold or D3_Hot in TNG platform*/
+ if (platform_is(INTEL_ATOM_MRFLD) && (state == PCI_D3cold))
+ d3_cold = true;
+
+ /* Issue the pmu command to PMU 2
+ * flag is needed to distinguish between
+ * S0ix vs interactive command in pmu_sc_irq()
+ */
+ status = pmu_issue_interactive_command(&cur_pmssc, false, d3_cold);
+
+ if (unlikely(status != PMU_SUCCESS)) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Failed to Issue a PM command to PMU2\n");
+ goto unlock;
+ }
+
+ calltime = ktime_get();
+retry:
+ /*
+ * Wait for interactive command to complete.
+ * If we dont wait, there is a possibility that
+ * the driver may access the device before its
+ * powered on in SCU.
+ *
+ */
+ status = _pmu2_wait_not_busy_yield();
+ if (unlikely(status)) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ retry_times++;
+
+ printk(KERN_CRIT "%s: D0ix transition failure: %04x %04X %s %20s:\n",
+ __func__,
+ pdev->vendor, pdev->device,
+ dev_name(&pdev->dev),
+ dev_driver_string(&pdev->dev));
+ printk(KERN_CRIT "interrupt pending = %d\n",
+ pmu_interrupt_pending());
+ printk(KERN_CRIT "pmu_busy_status = %d\n",
+ _pmu_read_status(PMU_BUSY_STATUS));
+ printk(KERN_CRIT "suspend_started = %d\n",
+ mid_pmu_cxt->suspend_started);
+ printk(KERN_CRIT "shutdown_started = %d\n",
+ mid_pmu_cxt->shutdown_started);
+ printk(KERN_CRIT "camera_off = %d display_off = %d\n",
+ mid_pmu_cxt->camera_off,
+ mid_pmu_cxt->display_off);
+ printk(KERN_CRIT "s0ix_possible = 0x%x\n",
+ mid_pmu_cxt->s0ix_possible);
+ printk(KERN_CRIT "s0ix_entered = 0x%x\n",
+ mid_pmu_cxt->s0ix_entered);
+ printk(KERN_CRIT "pmu_current_state = %d\n",
+ mid_pmu_cxt->pmu_current_state);
+ printk(KERN_CRIT "PMU is BUSY! retry_times[%d] total_delay[%lli]ms. Retry ...\n",
+ retry_times, (long long) ktime_to_ms(delta));
+ pmu_dump_logs();
+
+ trigger_all_cpu_backtrace();
+ if (retry_times < 60)
+ goto retry;
+ else
+ BUG();
+ }
+ if (record) {
+ record->real_change = 1;
+ record->ts = cpu_clock(record->cpu);
+ }
+
+ if (pmu_ops->set_power_state_ops)
+ pmu_ops->set_power_state_ops(state);
+
+ /* update stats */
+ inc_d0ix_stat((i-mid_pmu_cxt->pmu1_max_devs),
+ pci_to_platform_state(state));
+
+ /* check if tranisition to requested state has happened */
+ pmu_read_sss(&cur_pmssc);
+
+ chk_val = cur_pmssc.pmu2_states[sub_sys_index] &
+ (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+ new_value &= (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+ if ((chk_val == new_value) && (i < MAX_DEVICES))
+ update_dev_res(i, state);
+
+ WARN_ON(chk_val != new_value);
+
+nc_done:
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+ /* ATOMISP is always powered up on system-resume path. It needs
+ * to be turned off here if there is no driver to do it. */
+ if (!mid_pmu_cxt->camera_off) {
+ /* power down isp */
+ pmu_nc_set_power_state(APM_ISP_ISLAND | APM_IPH_ISLAND,
+ OSPM_ISLAND_DOWN, APM_REG_TYPE);
+ /* power down DPHY */
+ new_value = intel_mid_msgbus_read32(0x09, 0x03);
+ new_value |= 0x300;
+ intel_mid_msgbus_write32(0x09, 0x03, new_value);
+ mid_pmu_cxt->camera_off = true;
+ }
+#endif
+
+ /* FIXME:: If S0ix is enabled when North Complex is ON we see
+ * Fabric errors, tracked in BZ: 115181, hence hold pm_qos
+ * to restrict s0ix during North Island in D0i0
+ */
+ if (nc_device_state()) {
+ if (!pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+ pm_qos_add_request(mid_pmu_cxt->nc_restrict_qos,
+ PM_QOS_CPU_DMA_LATENCY, (CSTATE_EXIT_LATENCY_S0i1-1));
+ } else {
+ if (pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+ pm_qos_remove_request(mid_pmu_cxt->nc_restrict_qos);
+ }
+
+unlock:
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return status;
+}
+
+pci_power_t platfrom_pmu_choose_state(int lss)
+{
+ pci_power_t state = PCI_D3hot;
+
+ if (pmu_ops->pci_choose_state)
+ state = pmu_ops->pci_choose_state(lss);
+
+ return state;
+}
+
+/* return platform specific deepest states that the device can enter */
+pci_power_t pmu_pci_choose_state(struct pci_dev *pdev)
+{
+ int i;
+ int sub_sys_pos, sub_sys_index;
+ int status;
+ int device_lss;
+ int pmu_num;
+
+ pci_power_t state = PCI_D3hot;
+
+ if (pmu_initialized) {
+ status =
+ pmu_pci_to_indexes(pdev, &i, &pmu_num,
+ &sub_sys_index, &sub_sys_pos);
+
+ if ((status == PMU_SUCCESS) &&
+ (pmu_num == PMU_NUM_2)) {
+
+ device_lss =
+ (sub_sys_index * mid_pmu_cxt->ss_per_reg) +
+ sub_sys_pos;
+
+ state = platfrom_pmu_choose_state(device_lss);
+ }
+ }
+
+ return state;
+}
+
+int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc, bool ioc,
+ bool d3_cold)
+{
+ u32 command;
+
+ if (_pmu2_wait_not_busy()) {
+ dev_err(&mid_pmu_cxt->pmu_dev->dev,
+ "SCU BUSY. Operation not permitted\n");
+ return PMU_FAILED;
+ }
+
+ /* enable interrupts in PMU2 so that interrupts are
+ * propagated when ioc bit for a particular set
+ * command is set
+ */
+ /* Enable the hardware interrupt */
+ if (ioc)
+ pmu_set_interrupt_enable();
+
+ /* Configure the sub systems for pmu2 */
+ pmu_write_subsys_config(pm_ssc);
+
+ command = (ioc) ? INTERACTIVE_IOC_VALUE : INTERACTIVE_VALUE;
+
+ /* Special handling for PCI_D3cold in Tangier */
+ if (d3_cold)
+ command |= PM_CMD_D3_COLD;
+
+ /* send interactive command to SCU */
+ writel(command, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+ pmu_log_command(command, pm_ssc);
+
+ return 0;
+}
+
+/* Reads the status of each driver and updates the LSS values.
+ * To be called with scu_ready_sem mutex held, and pmu_config
+ * initialized with '0's
+ */
+static void update_all_lss_states(struct pmu_ss_states *pmu_config)
+{
+ int i;
+ u32 PCIALLDEV_CFG[4] = {0, 0, 0, 0};
+
+ if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+ for (i = 0; i < MAX_DEVICES; i++) {
+ int pmu_num = get_mid_pci_pmu_num(i);
+ struct pci_dev *pdev = get_mid_pci_drv(i, 0);
+
+ if ((pmu_num == PMU_NUM_2) && pdev) {
+ int ss_idx, ss_pos;
+ pci_power_t state;
+
+ ss_idx = get_mid_pci_ss_idx(i);
+ ss_pos = get_mid_pci_ss_pos(i);
+ state = pdev->current_state;
+ /* The case of device not probed yet:
+ * Force D0i3 */
+ if (state == PCI_UNKNOWN)
+ state = pmu_pci_choose_state(pdev);
+
+ /* By default its set to '0' hence
+ * no need to update PCI_D0 state
+ */
+ state = pmu_pci_get_weakest_state_for_lss
+ (i, pdev, state);
+
+ pmu_config->pmu2_states[ss_idx] |=
+ (pci_to_platform_state(state) <<
+ (ss_pos * BITS_PER_LSS));
+
+ PCIALLDEV_CFG[ss_idx] |=
+ (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+ }
+ }
+ }
+
+ platform_update_all_lss_states(pmu_config, PCIALLDEV_CFG);
+}
+
+static int pmu_init(void)
+{
+ int status;
+ struct pmu_ss_states pmu_config;
+ struct pmu_suspend_config *ss_config;
+ int ret = 0;
+ int retry_times = 0;
+
+
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU Driver loaded\n");
+ spin_lock_init(&mid_pmu_cxt->nc_ready_lock);
+
+ /* enumerate the PCI configuration space */
+ pmu_enumerate();
+
+ /* initialize the stats for pmu driver */
+ pmu_stats_init();
+
+ /* register platform pmu ops */
+ platform_set_pmu_ops();
+
+ /* platform specific initialization */
+ if (pmu_ops->init) {
+ status = pmu_ops->init();
+ if (status) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "pmu_ops->init failed\n");
+ goto out_err1;
+ }
+ }
+
+ /* initialize the state variables here */
+ ss_config = kzalloc(sizeof(struct pmu_suspend_config), GFP_KERNEL);
+
+ if (ss_config == NULL) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Allocation of memory for ss_config has failed\n");
+ status = PMU_FAILED;
+ goto out_err1;
+ }
+
+ memset(&pmu_config, 0, sizeof(pmu_config));
+
+ ss_config->ss_state = pmu_config;
+
+ /* initialize for the autonomous S0i3 */
+ mid_pmu_cxt->ss_config = ss_config;
+
+ /* setup the wake capable devices */
+ mid_pmu_cxt->ss_config->wake_state.wake_enable[0] = WAKE_ENABLE_0;
+ mid_pmu_cxt->ss_config->wake_state.wake_enable[1] = WAKE_ENABLE_1;
+
+ /* setup the ignore lss list */
+ mid_pmu_cxt->ignore_lss[0] = pmu_ignore_lss0;
+ mid_pmu_cxt->ignore_lss[1] = pmu_ignore_lss1;
+ mid_pmu_cxt->ignore_lss[2] = pmu_ignore_lss2;
+ mid_pmu_cxt->ignore_lss[3] = pmu_ignore_lss3;
+
+ /*set wkc to appropriate value suitable for s0ix*/
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+ /* Acquire the scu_ready_sem */
+ down(&mid_pmu_cxt->scu_ready_sem);
+
+ /* Now we have initialized the driver
+ * Allow drivers to get blocked in
+ * pmu_pci_set_power_state(), until we finish
+ * first interactive command.
+ */
+
+ pmu_initialized = true;
+
+ /* get the current status of each of the driver
+ * and update it in SCU
+ */
+ update_all_lss_states(&pmu_config);
+
+ status = pmu_issue_interactive_command(&pmu_config, false,
+ false);
+ if (status != PMU_SUCCESS) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,\
+ "Failure from pmu mode change to interactive."
+ " = %d\n", status);
+ status = PMU_FAILED;
+ up(&mid_pmu_cxt->scu_ready_sem);
+ goto out_err2;
+ }
+
+ /*
+ * Wait for interactive command to complete.
+ * If we dont wait, there is a possibility that
+ * the driver may access the device before its
+ * powered on in SCU.
+ *
+ */
+retry:
+ ret = _pmu2_wait_not_busy();
+ if (unlikely(ret)) {
+ retry_times++;
+ if (retry_times < 60) {
+ usleep_range(10, 500);
+ goto retry;
+ } else {
+ pmu_dump_logs();
+ BUG();
+ }
+ }
+
+ /* In cases were gfx is not enabled
+ * this will enable s0ix immediately
+ */
+ if (pmu_ops->set_power_state_ops)
+ pmu_ops->set_power_state_ops(PCI_D3hot);
+
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return PMU_SUCCESS;
+
+out_err2:
+ kfree(ss_config);
+ mid_pmu_cxt->ss_config = NULL;
+out_err1:
+ return status;
+}
+
+/**
+ * mid_pmu_probe - This is the function where most of the PMU driver
+ * initialization happens.
+ */
+static int
+mid_pmu_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+{
+ int ret;
+ struct mrst_pmu_reg __iomem *pmu;
+ u32 data;
+
+ mid_pmu_cxt->pmu_wake_lock =
+ wakeup_source_register("pmu_wake_lock");
+
+ if (!mid_pmu_cxt->pmu_wake_lock) {
+ pr_err("%s: unable to register pmu wake source.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Init the device */
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Mid PM device cant be enabled\n");
+ goto out_err0;
+ }
+
+ /* store the dev */
+ mid_pmu_cxt->pmu_dev = dev;
+ dev_warn(&dev->dev, "PMU DRIVER Probe called\n");
+
+ ret = pci_request_regions(dev, PMU_DRV_NAME);
+ if (ret < 0) {
+ pr_err("pci request region has failed\n");
+ goto out_err1;
+ }
+
+ mid_pmu_cxt->pmu1_max_devs = PMU1_MAX_DEVS;
+ mid_pmu_cxt->pmu2_max_devs = PMU2_MAX_DEVS;
+ mid_pmu_cxt->ss_per_reg = 16;
+
+ /* Following code is used to map address required for NC PM
+ * which is not needed for all platforms
+ */
+ if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+ data = intel_mid_msgbus_read32(OSPM_PUNIT_PORT, OSPM_APMBA);
+ mid_pmu_cxt->apm_base = data & 0xffff;
+
+ data = intel_mid_msgbus_read32(OSPM_PUNIT_PORT, OSPM_OSPMBA);
+ mid_pmu_cxt->ospm_base = data & 0xffff;
+ }
+
+ /* Map the memory of pmu1 PMU reg base */
+ pmu = pci_iomap(dev, 0, 0);
+ if (pmu == NULL) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Unable to map the PMU2 address space\n");
+ ret = PMU_FAILED;
+ goto out_err2;
+ }
+
+ mid_pmu_cxt->pmu_reg = pmu;
+
+ /* Map the memory of emergency emmc up */
+ mid_pmu_cxt->emergeny_emmc_up_addr =
+ ioremap_nocache(PMU_PANIC_EMMC_UP_ADDR, 4);
+ if (mid_pmu_cxt->emergeny_emmc_up_addr == NULL) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Unable to map the emergency emmc up address space\n");
+ ret = PMU_FAILED;
+ goto out_err3;
+ }
+
+ if (request_irq(dev->irq, pmu_sc_irq, IRQF_NO_SUSPEND, PMU_DRV_NAME,
+ NULL)) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Registering isr has failed\n");
+ ret = PMU_FAILED;
+ goto out_err4;
+ }
+
+ /* call pmu init() for initialization of pmu interface */
+ ret = pmu_init();
+ if (ret != PMU_SUCCESS) {
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU initialization has failed\n");
+ goto out_err5;
+ }
+ dev_warn(&mid_pmu_cxt->pmu_dev->dev, "after pmu initialization\n");
+
+ mid_pmu_cxt->pmu_init_time =
+ cpu_clock(raw_smp_processor_id());
+
+#ifdef CONFIG_PM_DEBUG
+ /*
+ * FIXME: Since S3 is not enabled yet we need to take
+ * a wake lock here. Else S3 will be triggered on display
+ * time out and platform will hang
+ */
+ if (platform_is(INTEL_ATOM_MRFLD) && !enable_s3)
+ __pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+#endif
+
+ return 0;
+
+out_err5:
+ free_irq(dev->irq, &pmu_sc_irq);
+out_err4:
+ iounmap(mid_pmu_cxt->emergeny_emmc_up_addr);
+ mid_pmu_cxt->emergeny_emmc_up_addr = NULL;
+out_err3:
+ iounmap(mid_pmu_cxt->pmu_reg);
+ mid_pmu_cxt->base_addr.pmu1_base = NULL;
+ mid_pmu_cxt->base_addr.pmu2_base = NULL;
+out_err2:
+ pci_release_region(dev, 0);
+out_err1:
+ pci_disable_device(dev);
+out_err0:
+ wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+ return ret;
+}
+
+static void mid_pmu_remove(struct pci_dev *dev)
+{
+ /* Freeing up the irq */
+ free_irq(dev->irq, &pmu_sc_irq);
+
+ if (pmu_ops->remove)
+ pmu_ops->remove();
+
+ iounmap(mid_pmu_cxt->emergeny_emmc_up_addr);
+ mid_pmu_cxt->emergeny_emmc_up_addr = NULL;
+
+ pci_iounmap(dev, mid_pmu_cxt->pmu_reg);
+ mid_pmu_cxt->base_addr.pmu1_base = NULL;
+ mid_pmu_cxt->base_addr.pmu2_base = NULL;
+
+ /* disable the current PCI device */
+ pci_release_region(dev, 0);
+ pci_disable_device(dev);
+
+ wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+}
+
+static void mid_pmu_shutdown(struct pci_dev *dev)
+{
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Mid PM mid_pmu_shutdown called\n");
+
+ if (mid_pmu_cxt) {
+ /* Restrict platform Cx state to C6 */
+ pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+ (CSTATE_EXIT_LATENCY_S0i1-1));
+
+ down(&mid_pmu_cxt->scu_ready_sem);
+ mid_pmu_cxt->shutdown_started = true;
+ up(&mid_pmu_cxt->scu_ready_sem);
+ }
+}
+
+static struct pci_driver driver = {
+ .name = PMU_DRV_NAME,
+ .id_table = mid_pm_ids,
+ .probe = mid_pmu_probe,
+ .remove = mid_pmu_remove,
+ .shutdown = mid_pmu_shutdown
+};
+
+static int standby_enter(void)
+{
+ u32 temp = 0;
+ int s3_state = mid_state_to_sys_state(MID_S3_STATE);
+
+ if (mid_s0ix_enter(MID_S3_STATE) != MID_S3_STATE) {
+ pmu_set_s0ix_complete();
+ return -EINVAL;
+ }
+
+ /* time stamp for end of s3 entry */
+ time_stamp_for_sleep_state_latency(s3_state, false, true);
+
+ __monitor((void *) &temp, 0, 0);
+ smp_mb();
+ __mwait(mid_pmu_cxt->s3_hint, 1);
+
+ /* time stamp for start of s3 exit */
+ time_stamp_for_sleep_state_latency(s3_state, true, false);
+
+ pmu_set_s0ix_complete();
+
+ /*set wkc to appropriate value suitable for s0ix*/
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+ writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+ &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+ if (platform_is(INTEL_ATOM_MRFLD))
+ up(&mid_pmu_cxt->scu_ready_sem);
+
+ return 0;
+}
+
+static int mid_suspend_begin(suspend_state_t state)
+{
+ mid_pmu_cxt->suspend_started = true;
+ pmu_s3_stats_update(1);
+
+ /* Restrict to C6 during suspend */
+ pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+ (CSTATE_EXIT_LATENCY_S0i1-1));
+ return 0;
+}
+
+static int mid_suspend_valid(suspend_state_t state)
+{
+ int ret = 0;
+
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_MEM:
+ /* check if we are ready */
+ if (likely(pmu_initialized))
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+static int mid_suspend_prepare(void)
+{
+ return 0;
+}
+
+static int mid_suspend_prepare_late(void)
+{
+ return 0;
+}
+
+static int mid_suspend_enter(suspend_state_t state)
+{
+ int ret;
+
+ if (state != PM_SUSPEND_MEM)
+ return -EINVAL;
+
+ /* one last check before entering standby */
+ if (pmu_ops->check_nc_sc_status) {
+ if (!(pmu_ops->check_nc_sc_status())) {
+ trace_printk("Device d0ix status check failed! Aborting Standby entry!\n");
+ WARN_ON(1);
+ }
+ }
+
+ trace_printk("s3_entry\n");
+ ret = standby_enter();
+ trace_printk("s3_exit %d\n", ret);
+ if (ret != 0)
+ dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+ "Failed to enter S3 status: %d\n", ret);
+
+ return ret;
+}
+
+static void mid_suspend_end(void)
+{
+ /* allow s0ix now */
+ pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+ PM_QOS_DEFAULT_VALUE);
+
+ pmu_s3_stats_update(0);
+ mid_pmu_cxt->suspend_started = false;
+}
+
+static const struct platform_suspend_ops mid_suspend_ops = {
+ .begin = mid_suspend_begin,
+ .valid = mid_suspend_valid,
+ .prepare = mid_suspend_prepare,
+ .prepare_late = mid_suspend_prepare_late,
+ .enter = mid_suspend_enter,
+ .end = mid_suspend_end,
+};
+
+/**
+ * mid_pci_register_init - register the PMU driver as PCI device
+ */
+static int __init mid_pci_register_init(void)
+{
+ int ret;
+
+ mid_pmu_cxt = kzalloc(sizeof(struct mid_pmu_dev), GFP_KERNEL);
+
+ if (mid_pmu_cxt == NULL)
+ return -ENOMEM;
+
+ mid_pmu_cxt->s3_restrict_qos =
+ kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+ if (mid_pmu_cxt->s3_restrict_qos) {
+ pm_qos_add_request(mid_pmu_cxt->s3_restrict_qos,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ } else {
+ return -ENOMEM;
+ }
+
+ init_nc_device_states();
+
+ mid_pmu_cxt->nc_restrict_qos =
+ kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+ if (mid_pmu_cxt->nc_restrict_qos == NULL)
+ return -ENOMEM;
+
+ /* initialize the semaphores */
+ sema_init(&mid_pmu_cxt->scu_ready_sem, 1);
+
+ /* registering PCI device */
+ ret = pci_register_driver(&driver);
+ suspend_set_ops(&mid_suspend_ops);
+
+ return ret;
+}
+fs_initcall(mid_pci_register_init);
+
+void pmu_power_off(void)
+{
+ /* wait till SCU is ready */
+ if (!_pmu2_wait_not_busy())
+ writel(S5_VALUE, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+ if (!_pmu2_wait_not_busy())
+ rpmsg_send_generic_simple_command(IPCMSG_COLD_OFF, 1);
+}
+
+static void __exit mid_pci_cleanup(void)
+{
+ if (mid_pmu_cxt) {
+ if (mid_pmu_cxt->s3_restrict_qos)
+ pm_qos_remove_request(mid_pmu_cxt->s3_restrict_qos);
+
+ if (pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+ pm_qos_remove_request(mid_pmu_cxt->nc_restrict_qos);
+ }
+
+ suspend_set_ops(NULL);
+
+ /* registering PCI device */
+ pci_unregister_driver(&driver);
+
+ if (mid_pmu_cxt) {
+ pmu_stats_finish();
+ kfree(mid_pmu_cxt->ss_config);
+ }
+
+ kfree(mid_pmu_cxt);
+}
+module_exit(mid_pci_cleanup);
--- /dev/null
+/*
+ * intel_soc_pmu.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _MID_PMU_H_
+#define _MID_PMU_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/jhash.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/nmi.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_wakeup.h>
+#include <asm/apic.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_mdfld.h"
+#include "intel_soc_clv.h"
+#include "intel_soc_mrfld.h"
+
+#define MID_PMU_MFLD_DRV_DEV_ID 0x0828
+#define MID_PMU_CLV_DRV_DEV_ID 0x08EC
+#define MID_PMU_MRFL_DRV_DEV_ID 0x11A1
+
+#define MID_MRFL_HDMI_DRV_DEV_ID 0x11A6
+
+/* SRAM address where PANIC START is written */
+#define PMU_PANIC_EMMC_UP_ADDR 0xFFFF3080
+#define PMU_PANIC_EMMC_UP_REQ_CMD 0xDEADBEEF
+
+#define MAX_DEVICES (PMU1_MAX_DEVS + PMU2_MAX_DEVS)
+#define PMU_MAX_LSS_SHARE 4
+
+#define PMU2_BUSY_TIMEOUT 500000
+#define HSU0_PCI_ID 0x81c
+#define HSU1_PCI_ID 0x81b
+#define HSI_PCI_ID 0x833
+
+#define MODE_ID_MAGIC_NUM 1
+
+#define LOG_ID_MASK 0x7F
+#define SUB_CLASS_MASK 0xFF00
+
+
+/* Definition for C6 Offload MSR Address */
+#define MSR_C6OFFLOAD_CTL_REG 0x120
+
+#define MSR_C6OFFLOAD_SET_LOW 1
+#define MSR_C6OFFLOAD_SET_HIGH 0
+
+#define C6OFFLOAD_BIT_MASK 0x2
+#define C6OFFLOAD_BIT 0x2
+
+#define PMU_DRV_NAME "intel_pmu_driver"
+
+#define MID_PCI_INDEX_HASH_BITS 8 /*size 256*/
+#define MID_PCI_INDEX_HASH_SIZE (1<<MID_PCI_INDEX_HASH_BITS)
+#define MID_PCI_INDEX_HASH_MASK (MID_PCI_INDEX_HASH_SIZE-1)
+
+/* some random number for initvalue */
+#define MID_PCI_INDEX_HASH_INITVALUE 0x27041975
+
+/*
+ * Values for programming the PM_CMD register based on the PM
+ * architecture speci
+*/
+
+#define S5_VALUE 0x309D2601
+#define S0I1_VALUE 0X30992601
+#define LPMP3_VALUE 0X40492601
+#define S0I3_VALUE 0X309B2601
+#define FAST_ON_OFF_VALUE 0X309E2601
+#define INTERACTIVE_VALUE 0X00002201
+#define INTERACTIVE_IOC_VALUE 0X00002301
+
+#define WAKE_ENABLE_0 0xffffffff
+#define WAKE_ENABLE_1 0xffffffff
+#define INVALID_WAKE_SRC 0xFFFF
+
+#define LOG_SS_MASK 0x80
+
+#define D0I0_MASK 0
+#define D0I1_MASK 1
+#define D0I2_MASK 2
+#define D0I3_MASK 3
+
+#define BITS_PER_LSS 2
+#define MAX_LSS_POSSIBLE 64
+#define SS_IDX_MASK 0x3
+#define SS_POS_MASK 0xF
+
+#define PMU_BASE_ADDR(pmu_num) ((pmu_num == 0) ? \
+ (u32) base_addr.pmu1_base :\
+ (u32) base_addr.pmu2_base);
+
+#define SSMSK(mask, lss) ((mask) << ((lss) * 2))
+#define SSWKC(lss) (1 << (lss))
+
+/* North Complex Power management */
+#define OSPM_PUNIT_PORT 0x04
+#define OSPM_OSPMBA 0x78
+#define OSPM_PM_SSC 0x20
+#define OSPM_PM_SSS 0x30
+
+#define OSPM_APMBA 0x7a
+#define APM_CMD 0x0
+#define APM_STS 0x04
+#define PM_CMD_D3_COLD (0x1 << 21)
+
+/* Size of command logging array */
+#define LOG_SIZE 5
+
+enum sys_state {
+ SYS_STATE_S0I0,
+ SYS_STATE_S0I1,
+ SYS_STATE_LPMP3,
+ SYS_STATE_S0I2,
+ SYS_STATE_S0I3,
+ SYS_STATE_S3,
+ SYS_STATE_S5,
+ SYS_STATE_MAX
+};
+
+enum int_status {
+ INVALID_INT = 0,
+ CMD_COMPLETE_INT = 1,
+ CMD_ERROR_INT = 2,
+ WAKE_RECEIVED_INT = 3,
+ SUBSYS_POW_ERR_INT = 4,
+ S0ix_MISS_INT = 5,
+ NO_ACKC6_INT = 6,
+ TRIGGERERR = 7,
+ INVALID_SRC_INT
+};
+
+enum pmu_number {
+ PMU_NUM_1,
+ PMU_NUM_2,
+ PMU_MAX_DEVS
+};
+
+enum pmu_ss_state {
+ SS_STATE_D0I0 = 0,
+ SS_STATE_D0I1 = 1,
+ SS_STATE_D0I2 = 2,
+ SS_STATE_D0I3 = 3
+};
+
+
+struct pmu_ss_states {
+ unsigned long pmu1_states;
+ unsigned long pmu2_states[4];
+};
+
+struct pci_dev_info {
+ u8 ss_pos;
+ u8 ss_idx;
+ u8 pmu_num;
+
+ u32 log_id;
+ u32 cap;
+ struct pci_dev *drv[PMU_MAX_LSS_SHARE];
+ pci_power_t power_state[PMU_MAX_LSS_SHARE];
+};
+
+struct pmu_wake_ss_states {
+ unsigned long wake_enable[2];
+ unsigned long pmu1_wake_states;
+ unsigned long pmu2_wake_states[4];
+};
+
+struct pmu_suspend_config {
+ struct pmu_ss_states ss_state;
+ struct pmu_wake_ss_states wake_state;
+};
+
+struct pci_dev_index {
+ struct pci_dev *pdev;
+ u8 index;
+};
+
+/* PMU register interface */
+struct mrst_pmu_reg {
+ u32 pm_sts; /* 0x00 */
+ u32 pm_cmd; /* 0x04 */
+ u32 pm_ics; /* 0x08 */
+ u32 _resv1;
+ u32 pm_wkc[2]; /* 0x10 */
+ u32 pm_wks[2]; /* 0x18 */
+ u32 pm_ssc[4]; /* 0x20 */
+ u32 pm_sss[4]; /* 0x30 */
+ u32 pm_wssc[4]; /* 0x40 */
+ u32 pm_c3c4; /* 0x50 */
+ u32 pm_c5c6; /* 0x54 */
+ u32 pm_msic; /* 0x58 */
+};
+
+struct mid_pmu_cmd_log {
+ struct timespec ts;
+ u32 command;
+ struct pmu_ss_states pm_ssc;
+};
+
+struct mid_pmu_irq_log {
+ struct timespec ts;
+ u32 status;
+};
+
+struct mid_pmu_ipc_log {
+ struct timespec ts;
+ u32 command;
+};
+
+struct mid_pmu_pmu_irq_log {
+ struct timespec ts;
+ u8 status;
+};
+
+struct mid_pmu_ipc_irq_log {
+ struct timespec ts;
+};
+
+union pmu_pm_status {
+ struct {
+ u32 pmu_rev:8;
+ u32 pmu_busy:1;
+ u32 mode_id:4;
+ u32 Reserved:19;
+ } pmu_status_parts;
+ u32 pmu_status_value;
+};
+
+union pmu_pm_ics {
+ struct {
+ u32 int_status:8;
+ u32 int_enable:1;
+ u32 int_pend:1;
+ /* New bit added in TNG to indicate device wakes*/
+ u32 sw_int_status:1;
+ u32 reserved:21;
+ } pmu_pm_ics_parts;
+ u32 pmu_pm_ics_value;
+};
+
+struct intel_mid_base_addr {
+ u32 *pmu1_base;
+ void __iomem *pmu2_base;
+ u32 *pm_table_base;
+ u32 __iomem *offload_reg;
+};
+
+#define MAX_PMU_LOG_STATES (S0I3_STATE_IDX - C4_STATE_IDX + 1)
+
+struct mid_pmu_stats {
+ u64 err_count[3];
+ u64 count;
+ u64 time;
+ u64 last_entry;
+ u64 last_try;
+ u64 first_entry;
+ u32 demote_count[MAX_PMU_LOG_STATES];
+ u32 display_blocker_count;
+ u32 camera_blocker_count;
+ u32 blocker_count[MAX_LSS_POSSIBLE];
+};
+
+struct device_residency {
+ u64 d0i0_entry;
+ u64 d0i3_entry;
+ u64 d0i0_acc;
+ u64 d0i3_acc;
+ u64 start;
+ pci_power_t state;
+};
+
+struct mid_pmu_dev {
+ bool suspend_started;
+ bool shutdown_started;
+ bool camera_off;
+ bool display_off;
+
+ u32 apm_base;
+ u32 ospm_base;
+ u32 pmu1_max_devs;
+ u32 pmu2_max_devs;
+ u32 ss_per_reg;
+ u32 d0ix_stat[MAX_LSS_POSSIBLE][SS_STATE_D0I3+1];
+ u32 num_wakes[MAX_DEVICES][SYS_STATE_MAX];
+ u32 ignore_lss[4];
+ u32 os_sss[4];
+#ifdef CONFIG_PM_DEBUG
+ u32 cstate_ignore;
+ struct pm_qos_request *cstate_qos;
+#endif
+
+ u32 __iomem *emergeny_emmc_up_addr;
+ u64 pmu_init_time;
+
+ int cmd_error_int;
+ int s0ix_possible;
+ int s0ix_entered;
+
+#ifdef LOG_PMU_EVENTS
+ int cmd_log_idx;
+ int ipc_log_idx;
+ int ipc_irq_log_idx;
+ int pmu_irq_log_idx;
+#endif
+
+ enum sys_state pmu_current_state;
+
+ struct pci_dev_info pci_devs[MAX_DEVICES];
+ struct pci_dev_index
+ pci_dev_hash[MID_PCI_INDEX_HASH_SIZE];
+ struct intel_mid_base_addr base_addr;
+ struct mrst_pmu_reg __iomem *pmu_reg;
+ struct semaphore scu_ready_sem;
+ struct mid_pmu_stats pmu_stats[SYS_STATE_MAX];
+ struct device_residency pmu_dev_res[MAX_DEVICES];
+ struct delayed_work log_work;
+ struct pm_qos_request *s3_restrict_qos;
+
+#ifdef LOG_PMU_EVENTS
+ struct mid_pmu_cmd_log cmd_log[LOG_SIZE];
+ struct mid_pmu_ipc_log ipc_log[LOG_SIZE];
+ struct mid_pmu_ipc_irq_log ipc_irq_log[LOG_SIZE];
+ struct mid_pmu_pmu_irq_log pmu_irq_log[LOG_SIZE];
+#endif
+ struct wakeup_source *pmu_wake_lock;
+
+ struct pmu_suspend_config *ss_config;
+ struct pci_dev *pmu_dev;
+ struct pm_qos_request *nc_restrict_qos;
+
+ spinlock_t nc_ready_lock;
+
+ int s3_hint;
+};
+
+struct platform_pmu_ops {
+ int (*init)(void);
+ void (*prepare)(int);
+ bool (*enter)(int);
+ void (*wakeup)(void);
+ void (*remove)(void);
+ pci_power_t (*pci_choose_state) (int);
+ void (*set_power_state_ops) (int);
+ void (*set_s0ix_complete) (void);
+ int (*nc_set_power_state) (int, int, int, int *);
+ bool (*check_nc_sc_status) (void);
+};
+
+extern char s0ix[5];
+extern struct platform_pmu_ops mfld_pmu_ops;
+extern struct platform_pmu_ops clv_pmu_ops;
+extern struct platform_pmu_ops mrfld_pmu_ops;
+extern struct platform_pmu_ops *get_platform_ops(void);
+extern void mfld_s0ix_sram_save_cleanup(void);
+extern void pmu_stats_init(void);
+extern void pmu_s3_stats_update(int enter);
+extern void pmu_stats_finish(void);
+extern void mfld_s0ix_sram_restore(u32 s0ix);
+extern void pmu_stat_error(u8 err_type);
+extern void pmu_stat_end(void);
+extern void pmu_stat_start(enum sys_state type);
+extern int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+ int *pmu_num, int *ss_idx, int *ss_pos);
+extern struct mid_pmu_dev *mid_pmu_cxt;
+extern void platform_set_pmu_ops(void);
+extern void pmu_read_sss(struct pmu_ss_states *pm_ssc);
+extern int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc,
+ bool ioc, bool d3_cold);
+extern int _pmu2_wait_not_busy(void);
+extern u32 get_s0ix_val_set_pm_ssc(int);
+extern int pmu_get_wake_source(void);
+extern bool pmu_initialized;
+extern struct platform_pmu_ops *pmu_ops;
+extern void platform_update_all_lss_states(struct pmu_ss_states *, int *);
+extern int set_extended_cstate_mode(const char *val, struct kernel_param *kp);
+extern int get_extended_cstate_mode(char *buffer, struct kernel_param *kp);
+extern int byt_pmu_nc_set_power_state(int islands, int state_type, int reg);
+extern int byt_pmu_nc_get_power_state(int islands, int reg);
+extern void pmu_set_interrupt_enable(void);
+extern void pmu_clear_interrupt_enable(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_pmu_irq(int status);
+extern void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc);
+extern void pmu_dump_logs(void);
+#endif
+
+/* Accessor function for pci_devs start */
+static inline void pmu_stat_clear(void)
+{
+ mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+static inline struct pci_dev *get_mid_pci_drv(int lss_index, int i)
+{
+ return mid_pmu_cxt->pci_devs[lss_index].drv[i];
+}
+
+static inline pci_power_t get_mid_pci_power_state(int lss_index, int i)
+{
+ return mid_pmu_cxt->pci_devs[lss_index].power_state[i];
+}
+
+static inline u8 get_mid_pci_ss_idx(int lss_index)
+{
+ return mid_pmu_cxt->pci_devs[lss_index].ss_idx & SS_IDX_MASK;
+}
+
+static inline u8 get_mid_pci_ss_pos(int lss_index)
+{
+ return mid_pmu_cxt->pci_devs[lss_index].ss_pos & SS_POS_MASK;
+}
+
+static inline u8 get_mid_pci_pmu_num(int lss_index)
+{
+ return mid_pmu_cxt->pci_devs[lss_index].pmu_num;
+}
+
+static inline void set_mid_pci_drv(int lss_index,
+ int i, struct pci_dev *pdev)
+{
+ mid_pmu_cxt->pci_devs[lss_index].drv[i] = pdev;
+}
+
+static inline void set_mid_pci_power_state(int lss_index,
+ int i, pci_power_t state)
+{
+ mid_pmu_cxt->pci_devs[lss_index].power_state[i] = state;
+}
+
+static inline void set_mid_pci_ss_idx(int lss_index, u8 ss_idx)
+{
+ mid_pmu_cxt->pci_devs[lss_index].ss_idx = ss_idx;
+}
+
+static inline void set_mid_pci_ss_pos(int lss_index, u8 ss_pos)
+{
+ mid_pmu_cxt->pci_devs[lss_index].ss_pos = ss_pos;
+}
+
+static inline void set_mid_pci_pmu_num(int lss_index, u8 pmu_num)
+{
+ mid_pmu_cxt->pci_devs[lss_index].pmu_num = pmu_num;
+}
+
+static inline void set_mid_pci_log_id(int lss_index, u32 log_id)
+{
+ mid_pmu_cxt->pci_devs[lss_index].log_id = log_id;
+}
+
+static inline void set_mid_pci_cap(int lss_index, u32 cap)
+{
+ mid_pmu_cxt->pci_devs[lss_index].cap = cap;
+}
+
+static inline u32 get_d0ix_stat(int lss_index, int state)
+{
+ return mid_pmu_cxt->d0ix_stat[lss_index][state];
+}
+
+static inline void inc_d0ix_stat(int lss_index, int state)
+{
+ mid_pmu_cxt->d0ix_stat[lss_index][state]++;
+}
+
+static inline void clear_d0ix_stats(void)
+{
+ memset(mid_pmu_cxt->d0ix_stat, 0, sizeof(mid_pmu_cxt->d0ix_stat));
+}
+
+/* Accessor functions for pci_devs end */
+
+static inline bool nc_device_state(void)
+{
+ return !mid_pmu_cxt->display_off || !mid_pmu_cxt->camera_off;
+}
+
+#endif
--- /dev/null
+/*
+ * mfld.c: Intel Medfield platform setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+static void penwell_arch_setup(void);
+/* penwell arch ops */
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
+static void mfld_power_off(void)
+{
+}
+
+static unsigned long __init mfld_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb;
+
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("read a zero ratio, should be incorrect!\n");
+ pr_err("force tsc ratio to 16 ...\n");
+ ratio = 16;
+ }
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ if ((lo & 0x7) == 0x7)
+ fsb = FSB_FREQ_83SKU;
+ else
+ fsb = FSB_FREQ_100SKU;
+ fast_calibrate = ratio * fsb;
+ pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+ lapic_timer_frequency = fsb * 1000 / HZ;
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void penwell_arch_setup()
+{
+ x86_platform.calibrate_tsc = mfld_calibrate_tsc;
+ pm_power_off = mfld_power_off;
+}
+
+void *get_penwell_ops()
+{
+ return &penwell_ops;
+}
+
+void *get_cloverview_ops()
+{
+ return &penwell_ops;
+}
--- /dev/null
+/*
+ * mrfl.c: Intel Merrifield platform specific setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Mark F. Brown <mark.f.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+
+#define APIC_DIVISOR 16
+
+enum intel_mid_sim_type __intel_mid_sim_platform;
+EXPORT_SYMBOL_GPL(__intel_mid_sim_platform);
+
+static void (*intel_mid_timer_init)(void);
+
+static void tangier_arch_setup(void);
+
+/* tangier arch ops */
+static struct intel_mid_ops tangier_ops = {
+ .arch_setup = tangier_arch_setup,
+};
+
+static unsigned long __init tangier_calibrate_tsc(void)
+{
+ /* [REVERT ME] fast timer calibration method to be defined */
+ if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP) ||
+ (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)) {
+ lapic_timer_frequency = 50000;
+ return 1000000;
+ }
+
+ if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_SLE) ||
+ (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)) {
+
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb, bus_freq;
+
+ /* *********************** */
+ /* Compute TSC:Ratio * FSB */
+ /* *********************** */
+
+ /* Compute Ratio */
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+ pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
+
+ ratio = (lo >> 8) & 0xFF;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
+ ratio = 4;
+ }
+
+ /* Compute FSB */
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
+ hi, lo);
+
+ bus_freq = lo & 0x7;
+ pr_debug("bus_freq = 0x%x\n", bus_freq);
+
+ if (bus_freq == 0)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 1)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 2)
+ fsb = FSB_FREQ_133SKU;
+ else if (bus_freq == 3)
+ fsb = FSB_FREQ_167SKU;
+ else if (bus_freq == 4)
+ fsb = FSB_FREQ_83SKU;
+ else if (bus_freq == 5)
+ fsb = FSB_FREQ_400SKU;
+ else if (bus_freq == 6)
+ fsb = FSB_FREQ_267SKU;
+ else if (bus_freq == 7)
+ fsb = FSB_FREQ_333SKU;
+ else {
+ BUG();
+ pr_err("Invalid bus_freq! Setting to minimal value!\n");
+ fsb = FSB_FREQ_100SKU;
+ }
+
+ /* TSC = FSB Freq * Resolved HFM Ratio */
+ fast_calibrate = ratio * fsb;
+ pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
+
+ /* ************************************ */
+ /* Calculate Local APIC Timer Frequency */
+ /* ************************************ */
+ lapic_timer_frequency = (fsb * 1000) / HZ;
+
+ pr_debug("Setting lapic_timer_frequency = %d\n",
+ lapic_timer_frequency);
+
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
+ return 0;
+}
+
+/* Allow user to enable simulator quirks settings for kernel */
+static int __init set_simulation_platform(char *str)
+{
+ int platform;
+
+ __intel_mid_sim_platform = INTEL_MID_CPU_SIMULATION_NONE;
+ if (get_option(&str, &platform)) {
+ __intel_mid_sim_platform = platform;
+ pr_info("simulator mode %d enabled.\n",
+ __intel_mid_sim_platform);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+early_param("mrfld_simulation", set_simulation_platform);
+
+static void __init tangier_time_init(void)
+{
+ /* [REVERT ME] ARAT capability not set in VP. Force setting */
+ if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP ||
+ intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_ARAT);
+
+ if (intel_mid_timer_init)
+ intel_mid_timer_init();
+}
+
+static void __init tangier_arch_setup(void)
+{
+ x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+ intel_mid_timer_init = x86_init.timers.timer_init;
+ x86_init.timers.timer_init = tangier_time_init;
+}
+
+void *get_tangier_ops()
+{
+ return &tangier_ops;
+}
+
+/* piggy back on anniedale ops right now */
+void *get_anniedale_ops()
+{
+ return &tangier_ops;
+}
--- /dev/null
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Dale B. Stimson <dale.b.stimson@intel.com>
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+
+#include <asm/intel-mid.h>
+#include "pmu_tng.h"
+
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+const char *pm_cmd_reg_name(u32 reg_addr)
+{
+ const char *pstr;
+
+ switch (reg_addr) {
+ case GFX_SS_PM0:
+ pstr = "GFX_SS_PM0";
+ break;
+ case GFX_SS_PM1:
+ pstr = "GFX_SS_PM1";
+ break;
+ case VED_SS_PM0:
+ pstr = "VED_SS_PM0";
+ break;
+ case VED_SS_PM1:
+ pstr = "VED_SS_PM1";
+ break;
+ case VEC_SS_PM0:
+ pstr = "VEC_SS_PM0";
+ break;
+ case VEC_SS_PM1:
+ pstr = "VEC_SS_PM1";
+ break;
+ case DSP_SS_PM:
+ pstr = "DSP_SS_PM";
+ break;
+ case VSP_SS_PM0:
+ pstr = "VSP_SS_PM0";
+ break;
+ case VSP_SS_PM1:
+ pstr = "VSP_SS_PM1";
+ break;
+ case MIO_SS_PM:
+ pstr = "MIO_SS_PM";
+ break;
+ case HDMIO_SS_PM:
+ pstr = "HDMIO_SS_PM";
+ break;
+ case NC_PM_SSS:
+ pstr = "NC_PM_SSS";
+ break;
+ default:
+ pstr = "(unknown_pm_reg)";
+ break;
+ }
+
+ return pstr;
+}
+#endif /* if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD */
+
+
+/**
+ * pmu_set_power_state_tng() - Send power management cmd to punit and
+ * wait for completion.
+ *
+ * This function implements Tangier/Merrifield punit-based power control.
+ *
+ * @reg_pm0 - Address of PM control register. Example: GFX_SS_PM0
+ *
+ * @si_mask: Control bits. "si" stands for "sub-islands".
+ * Bit mask specifying of one or more of the power islands to be affected.
+ * Each power island is a two bit field. These bits are set for every bit
+ * in each power island to be affected by this command.
+ * For each island, either 0 or all 2 of its bits may be specified, but it
+ * is an error to specify only 1 of its bits.
+ *
+ * @ns_mask: "ns" stands for "new state".
+ * New state for bits specified by si_mask.
+ * Bits in ns_mask that are not set in si_mask are ignored.
+ * Mask of new power state for the power islands specified by si_mask.
+ * These bits are 0b00 for full power off and 0b11 for full power on.
+ * Note that other values may be specified (0b01 and 0b10).
+ *
+ * Bit field values:
+ * TNG_SSC_I0 0b00 - i0 - power on, no clock or p[ower gating
+ * TNG_SSC_I1 0b01 - i1 - clock gated
+ * TNG_SSC_I2 0b01 - i2 - soft reset
+ * TNG_SSC_D3 0b11 - d3 - power off, hw state not retained
+ *
+ * NOTE: Bit mask ns_mask is inverted from the *actual* hardware register
+ * values being used for power control. This convention was adopted so that
+ * the API accepts 0b11 for full power-on and 0b00 for full power-off.
+ *
+ * Function return value: 0 if success, or -error_value.
+ *
+ * Example calls (ignoring return status):
+ * Turn on all gfx islands:
+ * si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ * ns_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ * pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn on all gfx islands: (Another way):
+ * si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ * ns_mask = 0xFFFFFFFF;
+ * pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn off all gfx islands:
+ * si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ * ns_mask = 0;
+ * pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ *
+ * Replaces (for Tangier):
+ * int pmu_nc_set_power_state(int islands, int state_type, int reg_type);
+ */
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask)
+{
+ u32 pwr_cur;
+ u32 pwr_val;
+ int tcount;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+ u32 pwr_prev;
+ int pwr_stored;
+#endif
+
+ ns_mask &= si_mask;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+ printk(KERN_ALERT "%s(\"%s\"=%#x, %#x, %#x);\n", __func__,
+ pm_cmd_reg_name(reg_pm0), reg_pm0, si_mask, ns_mask);
+#endif
+
+ pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+ printk(KERN_ALERT "%s: before: %s: read: %#x\n",
+ __func__, pm_cmd_reg_name(reg_pm0), pwr_cur);
+#endif
+ /* Return if already in desired state. */
+ if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+ return 0;
+
+ pwr_val = (pwr_cur & ~si_mask) | ns_mask;
+ intel_mid_msgbus_write32(PUNIT_PORT, reg_pm0, pwr_val);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+ printk(KERN_ALERT "%s: %s: write: %#x\n",
+ __func__, pm_cmd_reg_name(reg_pm0), pwr_val);
+ pwr_prev = 0;
+ pwr_stored = 0;
+#endif
+
+ for (tcount = 0; ; tcount++) {
+ if (tcount > 50) {
+ WARN(1, "%s: P-Unit PM action request timeout",
+ __func__);
+ return -EBUSY;
+ }
+ pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+ if (!pwr_stored || (pwr_prev != pwr_cur)) {
+ printk(KERN_ALERT
+ "%s: tries=%d: %s: read: %#x\n",
+ __func__, tcount,
+ pm_cmd_reg_name(reg_pm0),
+ pwr_cur);
+ pwr_stored = 1;
+ pwr_prev = pwr_cur;
+ }
+#endif
+
+ if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+ break;
+ udelay(10);
+ }
+
+ return 0;
+}
+
+static int __init pmu_nc_poweroff(void) {
+ /* Power off DPA */
+ pmu_set_power_state_tng (DSP_SS_PM, DPA_SSC, TNG_COMPOSITE_D3);
+ /* Power off MIO */
+ pmu_set_power_state_tng (MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_D3);
+ /* Power off ISP */
+ pmu_set_power_state_tng (ISP_SS_PM0, ISP_SSC, TNG_COMPOSITE_D3);
+ return 0;
+}
+
+late_initcall(pmu_nc_poweroff);
+
--- /dev/null
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Dale B. Stimson <dale.b.stimson@intel.com>
+ * Austin Hu <austin.hu@intel.com>
+ */
+
+#ifndef _PMU_TNG_H_
+#define _PMU_TNG_H_
+
+#include <linux/types.h>
+
+/* Per TNG Punit HAS */
+
+#define PUNIT_PORT 0x04
+
+/*
+ * Registers on msgbus port 4 (p-unit) for power/freq control.
+ * Bits 7:0 of the PM0 (or just PM) registers are power control bits, whereas
+ * bits 31:24 are the corresponding status bits.
+*/
+
+/* Subsystem status of all North Cluster IPs (bits NC_PM_SSS_*) */
+#define NC_PM_SSS 0x3f
+
+/*
+ * Bit masks for power islands, as present in PM0 or PM registers.
+ * These reside as control bits in bits 7:0 of each register and
+ * as status bits in bits 31:24 of each register.
+ * Each power island has a 2-bit field which contains a value of TNG_SSC_*.
+ */
+#define SSC_TO_SSS_SHIFT 24
+
+/* GFX_SS_PM0 island */
+#define GFX_SS_PM0 0x30
+#define GFX_SS_PM1 0x31
+
+#define GFX_SLC_SSC 0x03
+#define GFX_SDKCK_SSC 0x0c
+#define GFX_RSCD_SSC 0x30
+#define GFX_SLC_LDO_SSC 0xc0
+
+#define GFX_SLC_SHIFT 0
+#define GFX_SDKCK_SHIFT 2
+#define GFX_RSCD_SHIFT 4
+#define GFX_SLC_LDO_SHIFT 6
+
+/* VED_SS_PMx power island */
+#define VED_SS_PM0 0x32
+#define VED_SS_PM1 0x33
+
+#define VED_SSC 0x03
+
+/* VEC_SS_PMx power island */
+#define VEC_SS_PM0 0x34
+#define VEC_SS_PM1 0x35
+
+#define VEC_SSC 0x03
+
+/* DSP_SS_PM power islands */
+#define DSP_SS_PM 0x36
+
+#define DPA_SSC 0x03
+#define DPB_SSC 0x0c
+#define DPC_SSC 0x30
+
+#define DPA_SHIFT 0
+#define DPB_SHIFT 2
+#define DPC_SHIFT 4
+
+/* VSP_SS_PMx power islands */
+#define VSP_SS_PM0 0x37
+#define VSP_SS_PM1 0x38
+
+#define VSP_SSC 0x03
+
+/* ISP_SS_PMx power islands */
+#define ISP_SS_PM0 0x39
+#define ISP_SS_PM1 0x3a
+
+#define ISP_SSC 0x03
+
+/* MIO_SS_PM power island */
+#define MIO_SS_PM 0x3b
+
+#define MIO_SSC 0x03
+
+/* HDMIO_SS_PM power island */
+#define HDMIO_SS_PM 0x3c
+
+#define HDMIO_SSC 0x03
+
+/*
+ * Subsystem status bits for NC_PM_SSS. Status of all North Cluster IPs.
+ * These correspond to the above bits.
+ */
+#define NC_PM_SSS_GFX_SLC 0x00000003
+#define NC_PM_SSS_GFX_SDKCK 0x0000000c
+#define NC_PM_SSS_GFX_RSCD 0x00000030
+#define NC_PM_SSS_VED 0x000000c0
+#define NC_PM_SSS_VEC 0x00000300
+#define NC_PM_SSS_DPA 0x00000c00
+#define NC_PM_SSS_DPB 0x00003000
+#define NC_PM_SSS_DPC 0x0000c000
+#define NC_PM_SSS_VSP 0x00030000
+#define NC_PM_SSS_ISP 0x000c0000
+#define NC_PM_SSS_MIO 0x00300000
+#define NC_PM_SSS_HDMIO 0x00c00000
+#define NC_PM_SSS_GFX_SLC_LDO 0x03000000
+
+/*
+ * Frequency bits for *_PM1 registers above.
+ */
+#define IP_FREQ_VALID 0x80 /* Freq is valid bit */
+
+#define IP_FREQ_SIZE 5 /* number of bits in freq fields */
+#define IP_FREQ_MASK 0x1f /* Bit mask for freq field */
+
+/* Positions of various frequency fields */
+#define IP_FREQ_POS 0 /* Freq control [4:0] */
+#define IP_FREQ_GUAR_POS 8 /* Freq guar [12:8] */
+#define IP_FREQ_STAT_POS 24 /* Freq status [28:24] */
+
+#define IP_FREQ_100_00 0x1f /* 0b11111 100.00 */
+#define IP_FREQ_106_67 0x1d /* 0b11101 106.67 */
+#define IP_FREQ_133_30 0x17 /* 0b10111 133.30 */
+#define IP_FREQ_160_00 0x13 /* 0b10011 160.00 */
+#define IP_FREQ_177_78 0x11 /* 0b10001 177.78 */
+#define IP_FREQ_200_00 0x0f /* 0b01111 200.00 */
+#define IP_FREQ_213_33 0x0e /* 0b01110 213.33 */
+#define IP_FREQ_266_67 0x0b /* 0b01011 266.67 */
+#define IP_FREQ_320_00 0x09 /* 0b01001 320.00 */
+#define IP_FREQ_355_56 0x08 /* 0b01000 355.56 */
+#define IP_FREQ_400_00 0x07 /* 0b00111 400.00 */
+#define IP_FREQ_457_14 0x06 /* 0b00110 457.14 */
+#define IP_FREQ_533_33 0x05 /* 0b00101 533.33 */
+#define IP_FREQ_640_00 0x04 /* 0b00100 640.00 */
+#define IP_FREQ_800_00 0x03 /* 0b00011 800.00 */
+#define IP_FREQ_RESUME_SET 0x64
+
+/* Tangier power states for each island */
+#define TNG_SSC_I0 (0b00) /* i0 - power on, no clock or p[ower gating */
+#define TNG_SSC_I1 (0b01) /* i1 - clock gated */
+#define TNG_SSC_I2 (0b01) /* i2 - soft reset */
+#define TNG_SSC_D3 (0b11) /* d3 - power off, hw state not retained */
+
+#define TNG_SSC_MASK (0b11) /* bit mask of all involved bits. */
+
+/* Masks for the completely on and off states for 4 islands */
+#define TNG_COMPOSITE_I0 (0b00000000)
+#define TNG_COMPOSITE_D3 (0b11111111)
+
+#define DEBUG_PM_CMD 0
+#if !defined DEBUG_PM_CMD
+#define DEBUG_PM_CMD 1
+#endif
+
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask);
+#endif /* ifndef _PMU_TNG_H_ */
+++ /dev/null
-obj-$(CONFIG_X86_INTEL_MID) += mrst.o
-obj-$(CONFIG_X86_INTEL_MID) += vrtc.o
-obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_mrst.o
+++ /dev/null
-/*
- * early_printk_mrst.c - early consoles for Intel MID platforms
- *
- * Copyright (c) 2008-2010, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/*
- * This file implements two early consoles named mrst and hsu.
- * mrst is based on Maxim3110 spi-uart device, it exists in both
- * Moorestown and Medfield platforms, while hsu is based on a High
- * Speed UART device which only exists in the Medfield platform
- */
-
-#include <linux/serial_reg.h>
-#include <linux/serial_mfd.h>
-#include <linux/kmsg_dump.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/mrst.h>
-
-#define MRST_SPI_TIMEOUT 0x200000
-#define MRST_REGBASE_SPI0 0xff128000
-#define MRST_REGBASE_SPI1 0xff128400
-#define MRST_CLK_SPI0_REG 0xff11d86c
-
-/* Bit fields in CTRLR0 */
-#define SPI_DFS_OFFSET 0
-
-#define SPI_FRF_OFFSET 4
-#define SPI_FRF_SPI 0x0
-#define SPI_FRF_SSP 0x1
-#define SPI_FRF_MICROWIRE 0x2
-#define SPI_FRF_RESV 0x3
-
-#define SPI_MODE_OFFSET 6
-#define SPI_SCPH_OFFSET 6
-#define SPI_SCOL_OFFSET 7
-#define SPI_TMOD_OFFSET 8
-#define SPI_TMOD_TR 0x0 /* xmit & recv */
-#define SPI_TMOD_TO 0x1 /* xmit only */
-#define SPI_TMOD_RO 0x2 /* recv only */
-#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
-
-#define SPI_SLVOE_OFFSET 10
-#define SPI_SRL_OFFSET 11
-#define SPI_CFS_OFFSET 12
-
-/* Bit fields in SR, 7 bits */
-#define SR_MASK 0x7f /* cover 7 bits */
-#define SR_BUSY (1 << 0)
-#define SR_TF_NOT_FULL (1 << 1)
-#define SR_TF_EMPT (1 << 2)
-#define SR_RF_NOT_EMPT (1 << 3)
-#define SR_RF_FULL (1 << 4)
-#define SR_TX_ERR (1 << 5)
-#define SR_DCOL (1 << 6)
-
-struct dw_spi_reg {
- u32 ctrl0;
- u32 ctrl1;
- u32 ssienr;
- u32 mwcr;
- u32 ser;
- u32 baudr;
- u32 txfltr;
- u32 rxfltr;
- u32 txflr;
- u32 rxflr;
- u32 sr;
- u32 imr;
- u32 isr;
- u32 risr;
- u32 txoicr;
- u32 rxoicr;
- u32 rxuicr;
- u32 msticr;
- u32 icr;
- u32 dmacr;
- u32 dmatdlr;
- u32 dmardlr;
- u32 idr;
- u32 version;
-
- /* Currently operates as 32 bits, though only the low 16 bits matter */
- u32 dr;
-} __packed;
-
-#define dw_readl(dw, name) __raw_readl(&(dw)->name)
-#define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name)
-
-/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
-static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
-
-static u32 *pclk_spi0;
-/* Always contains an accessible address, start with 0 */
-static struct dw_spi_reg *pspi;
-
-static struct kmsg_dumper dw_dumper;
-static int dumper_registered;
-
-static void dw_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
-{
- static char line[1024];
- size_t len;
-
- /* When run to this, we'd better re-init the HW */
- mrst_early_console_init();
-
- while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
- early_mrst_console.write(&early_mrst_console, line, len);
-}
-
-/* Set the ratio rate to 115200, 8n1, IRQ disabled */
-static void max3110_write_config(void)
-{
- u16 config;
-
- config = 0xc001;
- dw_writel(pspi, dr, config);
-}
-
-/* Translate char to a eligible word and send to max3110 */
-static void max3110_write_data(char c)
-{
- u16 data;
-
- data = 0x8000 | c;
- dw_writel(pspi, dr, data);
-}
-
-void mrst_early_console_init(void)
-{
- u32 ctrlr0 = 0;
- u32 spi0_cdiv;
- u32 freq; /* Freqency info only need be searched once */
-
- /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
- pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
- MRST_CLK_SPI0_REG);
- spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
- freq = 100000000 / (spi0_cdiv + 1);
-
- if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
- mrst_spi_paddr = MRST_REGBASE_SPI1;
-
- pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
- mrst_spi_paddr);
-
- /* Disable SPI controller */
- dw_writel(pspi, ssienr, 0);
-
- /* Set control param, 8 bits, transmit only mode */
- ctrlr0 = dw_readl(pspi, ctrl0);
-
- ctrlr0 &= 0xfcc0;
- ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
- | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
- dw_writel(pspi, ctrl0, ctrlr0);
-
- /*
- * Change the spi0 clk to comply with 115200 bps, use 100000 to
- * calculate the clk dividor to make the clock a little slower
- * than real baud rate.
- */
- dw_writel(pspi, baudr, freq/100000);
-
- /* Disable all INT for early phase */
- dw_writel(pspi, imr, 0x0);
-
- /* Set the cs to spi-uart */
- dw_writel(pspi, ser, 0x2);
-
- /* Enable the HW, the last step for HW init */
- dw_writel(pspi, ssienr, 0x1);
-
- /* Set the default configuration */
- max3110_write_config();
-
- /* Register the kmsg dumper */
- if (!dumper_registered) {
- dw_dumper.dump = dw_kmsg_dump;
- kmsg_dump_register(&dw_dumper);
- dumper_registered = 1;
- }
-}
-
-/* Slave select should be called in the read/write function */
-static void early_mrst_spi_putc(char c)
-{
- unsigned int timeout;
- u32 sr;
-
- timeout = MRST_SPI_TIMEOUT;
- /* Early putc needs to make sure the TX FIFO is not full */
- while (--timeout) {
- sr = dw_readl(pspi, sr);
- if (!(sr & SR_TF_NOT_FULL))
- cpu_relax();
- else
- break;
- }
-
- if (!timeout)
- pr_warning("MRST earlycon: timed out\n");
- else
- max3110_write_data(c);
-}
-
-/* Early SPI only uses polling mode */
-static void early_mrst_spi_write(struct console *con, const char *str, unsigned n)
-{
- int i;
-
- for (i = 0; i < n && *str; i++) {
- if (*str == '\n')
- early_mrst_spi_putc('\r');
- early_mrst_spi_putc(*str);
- str++;
- }
-}
-
-struct console early_mrst_console = {
- .name = "earlymrst",
- .write = early_mrst_spi_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/*
- * Following is the early console based on Medfield HSU (High
- * Speed UART) device.
- */
-#define HSU_PORT_BASE 0xffa28080
-
-static void __iomem *phsu;
-
-void hsu_early_console_init(const char *s)
-{
- unsigned long paddr, port = 0;
- u8 lcr;
-
- /*
- * Select the early HSU console port if specified by user in the
- * kernel command line.
- */
- if (*s && !kstrtoul(s, 10, &port))
- port = clamp_val(port, 0, 2);
-
- paddr = HSU_PORT_BASE + port * 0x80;
- phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
-
- /* Disable FIFO */
- writeb(0x0, phsu + UART_FCR);
-
- /* Set to default 115200 bps, 8n1 */
- lcr = readb(phsu + UART_LCR);
- writeb((0x80 | lcr), phsu + UART_LCR);
- writeb(0x18, phsu + UART_DLL);
- writeb(lcr, phsu + UART_LCR);
- writel(0x3600, phsu + UART_MUL*4);
-
- writeb(0x8, phsu + UART_MCR);
- writeb(0x7, phsu + UART_FCR);
- writeb(0x3, phsu + UART_LCR);
-
- /* Clear IRQ status */
- readb(phsu + UART_LSR);
- readb(phsu + UART_RX);
- readb(phsu + UART_IIR);
- readb(phsu + UART_MSR);
-
- /* Enable FIFO */
- writeb(0x7, phsu + UART_FCR);
-}
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void early_hsu_putc(char ch)
-{
- unsigned int timeout = 10000; /* 10ms */
- u8 status;
-
- while (--timeout) {
- status = readb(phsu + UART_LSR);
- if (status & BOTH_EMPTY)
- break;
- udelay(1);
- }
-
- /* Only write the char when there was no timeout */
- if (timeout)
- writeb(ch, phsu + UART_TX);
-}
-
-static void early_hsu_write(struct console *con, const char *str, unsigned n)
-{
- int i;
-
- for (i = 0; i < n && *str; i++) {
- if (*str == '\n')
- early_hsu_putc('\r');
- early_hsu_putc(*str);
- str++;
- }
-}
-
-struct console early_hsu_console = {
- .name = "earlyhsu",
- .write = early_hsu_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
+++ /dev/null
-/*
- * mrst.c: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2008 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#define pr_fmt(fmt) "mrst: " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/intel_pmic_gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/i2c/pca953x.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/gpio.h>
-#include <linux/i2c/tc35876x.h>
-
-#include <asm/setup.h>
-#include <asm/mpspec_def.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
-#include <asm/io.h>
-#include <asm/i8259.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/reboot.h>
-
-/*
- * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
- * cmdline option x86_mrst_timer can be used to override the configuration
- * to prefer one or the other.
- * at runtime, there are basically three timer configurations:
- * 1. per cpu apbt clock only
- * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
- * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
- *
- * by default (without cmdline option), platform code first detects cpu type
- * to see if we are on lincroft or penwell, then set up both lapic or apbt
- * clocks accordingly.
- * i.e. by default, medfield uses configuration #2, moorestown uses #1.
- * config #3 is supported but not recommended on medfield.
- *
- * rating and feature summary:
- * lapic (with C3STOP) --------- 100
- * apbt (always-on) ------------ 110
- * lapic (always-on,ARAT) ------ 150
- */
-
-__cpuinitdata enum mrst_timer_options mrst_timer_options;
-
-static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
-static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
-enum mrst_cpu_type __mrst_cpu_chip;
-EXPORT_SYMBOL_GPL(__mrst_cpu_chip);
-
-int sfi_mtimer_num;
-
-struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
-EXPORT_SYMBOL_GPL(sfi_mrtc_array);
-int sfi_mrtc_num;
-
-static void mrst_power_off(void)
-{
-}
-
-static void mrst_reboot(void)
-{
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
-}
-
-/* parse all the mtimer info to a static mtimer array */
-static int __init sfi_parse_mtmr(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_timer_table_entry *pentry;
- struct mpc_intsrc mp_irq;
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mtimer_num) {
- sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_timer_table_entry);
- pentry = (struct sfi_timer_table_entry *) sb->pentry;
- totallen = sfi_mtimer_num * sizeof(*pentry);
- memcpy(sfi_mtimer_array, pentry, totallen);
- }
-
- pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
- pentry = sfi_mtimer_array;
- for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
- pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz,"
- " irq = %d\n", totallen, (u32)pentry->phys_addr,
- pentry->freq_hz, pentry->irq);
- if (!pentry->irq)
- continue;
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
-/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
- mp_irq.irqflag = 5;
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- }
-
- return 0;
-}
-
-struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
-{
- int i;
- if (hint < sfi_mtimer_num) {
- if (!sfi_mtimer_usage[hint]) {
- pr_debug("hint taken for timer %d irq %d\n",\
- hint, sfi_mtimer_array[hint].irq);
- sfi_mtimer_usage[hint] = 1;
- return &sfi_mtimer_array[hint];
- }
- }
- /* take the first timer available */
- for (i = 0; i < sfi_mtimer_num;) {
- if (!sfi_mtimer_usage[i]) {
- sfi_mtimer_usage[i] = 1;
- return &sfi_mtimer_array[i];
- }
- i++;
- }
- return NULL;
-}
-
-void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
-{
- int i;
- for (i = 0; i < sfi_mtimer_num;) {
- if (mtmr->irq == sfi_mtimer_array[i].irq) {
- sfi_mtimer_usage[i] = 0;
- return;
- }
- i++;
- }
-}
-
-/* parse all the mrtc info to a global mrtc array */
-int __init sfi_parse_mrtc(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_rtc_table_entry *pentry;
- struct mpc_intsrc mp_irq;
-
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mrtc_num) {
- sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_rtc_table_entry);
- pentry = (struct sfi_rtc_table_entry *)sb->pentry;
- totallen = sfi_mrtc_num * sizeof(*pentry);
- memcpy(sfi_mrtc_array, pentry, totallen);
- }
-
- pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
- pentry = sfi_mrtc_array;
- for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
- pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
- totallen, (u32)pentry->phys_addr, pentry->irq);
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = 0xf; /* level trigger and active low */
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- }
- return 0;
-}
-
-static unsigned long __init mrst_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = PENWELL_FSB_FREQ_83SKU;
- else
- fsb = PENWELL_FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
-
- if (fast_calibrate)
- return fast_calibrate;
-
- return 0;
-}
-
-static void __init mrst_time_init(void)
-{
- sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
- switch (mrst_timer_options) {
- case MRST_TIMER_APBT_ONLY:
- break;
- case MRST_TIMER_LAPIC_APBT:
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- break;
- default:
- if (!boot_cpu_has(X86_FEATURE_ARAT))
- break;
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- return;
- }
- /* we need at least one APB timer */
- pre_init_apic_IRQ0();
- apbt_time_init();
-}
-
-static void __cpuinit mrst_arch_setup(void)
-{
- if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
- __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
- else {
- pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
- }
-}
-
-/* MID systems don't have i8042 controller */
-static int mrst_i8042_detect(void)
-{
- return 0;
-}
-
-/*
- * Moorestown does not have external NMI source nor port 0x61 to report
- * NMI status. The possible NMI sources are from pmu as a result of NMI
- * watchdog or lock debug. Reading io port 0x61 results in 0xff which
- * misled NMI handler.
- */
-static unsigned char mrst_get_nmi_reason(void)
-{
- return 0;
-}
-
-/*
- * Moorestown specific x86_init function overrides and early setup
- * calls.
- */
-void __init x86_mrst_early_setup(void)
-{
- x86_init.resources.probe_roms = x86_init_noop;
- x86_init.resources.reserve_resources = x86_init_noop;
-
- x86_init.timers.timer_init = mrst_time_init;
- x86_init.timers.setup_percpu_clockev = x86_init_noop;
-
- x86_init.irqs.pre_vector_init = x86_init_noop;
-
- x86_init.oem.arch_setup = mrst_arch_setup;
-
- x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
-
- x86_platform.calibrate_tsc = mrst_calibrate_tsc;
- x86_platform.i8042_detect = mrst_i8042_detect;
- x86_init.timers.wallclock_init = mrst_rtc_init;
- x86_platform.get_nmi_reason = mrst_get_nmi_reason;
-
- x86_init.pci.init = pci_mrst_init;
- x86_init.pci.fixup_irqs = x86_init_noop;
-
- legacy_pic = &null_legacy_pic;
-
- /* Moorestown specific power_off/restart method */
- pm_power_off = mrst_power_off;
- machine_ops.emergency_restart = mrst_reboot;
-
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
- set_bit(MP_BUS_ISA, mp_bus_not_pci);
-}
-
-/*
- * if user does not want to use per CPU apb timer, just give it a lower rating
- * than local apic timer and skip the late per cpu timer init.
- */
-static inline int __init setup_x86_mrst_timer(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp("apbt_only", arg) == 0)
- mrst_timer_options = MRST_TIMER_APBT_ONLY;
- else if (strcmp("lapic_and_apbt", arg) == 0)
- mrst_timer_options = MRST_TIMER_LAPIC_APBT;
- else {
- pr_warning("X86 MRST timer option %s not recognised"
- " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
- arg);
- return -EINVAL;
- }
- return 0;
-}
-__setup("x86_mrst_timer=", setup_x86_mrst_timer);
-
-/*
- * Parsing GPIO table first, since the DEVS table will need this table
- * to map the pin name to the actual pin.
- */
-static struct sfi_gpio_table_entry *gpio_table;
-static int gpio_num_entry;
-
-static int __init sfi_parse_gpio(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_gpio_table_entry *pentry;
- int num, i;
-
- if (gpio_table)
- return 0;
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
- pentry = (struct sfi_gpio_table_entry *)sb->pentry;
-
- gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
- if (!gpio_table)
- return -1;
- memcpy(gpio_table, pentry, num * sizeof(*pentry));
- gpio_num_entry = num;
-
- pr_debug("GPIO pin info:\n");
- for (i = 0; i < num; i++, pentry++)
- pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
- " pin = %d\n", i,
- pentry->controller_name,
- pentry->pin_name,
- pentry->pin_no);
- return 0;
-}
-
-static int get_gpio_by_name(const char *name)
-{
- struct sfi_gpio_table_entry *pentry = gpio_table;
- int i;
-
- if (!pentry)
- return -1;
- for (i = 0; i < gpio_num_entry; i++, pentry++) {
- if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
- return pentry->pin_no;
- }
- return -1;
-}
-
-/*
- * Here defines the array of devices platform data that IAFW would export
- * through SFI "DEVS" table, we use name and type to match the device and
- * its platform data.
- */
-struct devs_id {
- char name[SFI_NAME_LEN + 1];
- u8 type;
- u8 delay;
- void *(*get_platform_data)(void *info);
-};
-
-/* the offset for the mapping of global gpio pin to irq */
-#define MRST_IRQ_OFFSET 0x100
-
-static void __init *pmic_gpio_platform_data(void *info)
-{
- static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
- int gpio_base = get_gpio_by_name("pmic_gpio_base");
-
- if (gpio_base == -1)
- gpio_base = 64;
- pmic_gpio_pdata.gpio_base = gpio_base;
- pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
- pmic_gpio_pdata.gpiointr = 0xffffeff8;
-
- return &pmic_gpio_pdata;
-}
-
-static void __init *max3111_platform_data(void *info)
-{
- struct spi_board_info *spi_info = info;
- int intr = get_gpio_by_name("max3111_int");
-
- spi_info->mode = SPI_MODE_0;
- if (intr == -1)
- return NULL;
- spi_info->irq = intr + MRST_IRQ_OFFSET;
- return NULL;
-}
-
-/* we have multiple max7315 on the board ... */
-#define MAX7315_NUM 2
-static void __init *max7315_platform_data(void *info)
-{
- static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
- static int nr;
- struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- if (nr == MAX7315_NUM) {
- pr_err("too many max7315s, we only support %d\n",
- MAX7315_NUM);
- return NULL;
- }
- /* we have several max7315 on the board, we only need load several
- * instances of the same pca953x driver to cover them
- */
- strcpy(i2c_info->type, "max7315");
- if (nr++) {
- sprintf(base_pin_name, "max7315_%d_base", nr);
- sprintf(intr_pin_name, "max7315_%d_int", nr);
- } else {
- strcpy(base_pin_name, "max7315_base");
- strcpy(intr_pin_name, "max7315_int");
- }
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base == -1)
- return NULL;
- max7315->gpio_base = gpio_base;
- if (intr != -1) {
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- max7315->irq_base = -1;
- }
- return max7315;
-}
-
-static void *tca6416_platform_data(void *info)
-{
- static struct pca953x_platform_data tca6416;
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- strcpy(i2c_info->type, "tca6416");
- strcpy(base_pin_name, "tca6416_base");
- strcpy(intr_pin_name, "tca6416_int");
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base == -1)
- return NULL;
- tca6416.gpio_base = gpio_base;
- if (intr != -1) {
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- tca6416.irq_base = -1;
- }
- return &tca6416;
-}
-
-static void *mpu3050_platform_data(void *info)
-{
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("mpu3050_int");
-
- if (intr == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- return NULL;
-}
-
-static void __init *emc1403_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("thermal_int");
- int intr2nd = get_gpio_by_name("thermal_alert");
-
- if (intr == -1 || intr2nd == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static void __init *lis331dl_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("accel_int");
- int intr2nd = get_gpio_by_name("accel_2");
-
- if (intr == -1 || intr2nd == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static void __init *no_platform_data(void *info)
-{
- return NULL;
-}
-
-static struct resource msic_resources[] = {
- {
- .start = INTEL_MSIC_IRQ_PHYS_BASE,
- .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct intel_msic_platform_data msic_pdata;
-
-static struct platform_device msic_device = {
- .name = "intel_msic",
- .id = -1,
- .dev = {
- .platform_data = &msic_pdata,
- },
- .num_resources = ARRAY_SIZE(msic_resources),
- .resource = msic_resources,
-};
-
-static inline bool mrst_has_msic(void)
-{
- return mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL;
-}
-
-static int msic_scu_status_change(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- if (code == SCU_DOWN) {
- platform_device_unregister(&msic_device);
- return 0;
- }
-
- return platform_device_register(&msic_device);
-}
-
-static int __init msic_init(void)
-{
- static struct notifier_block msic_scu_notifier = {
- .notifier_call = msic_scu_status_change,
- };
-
- /*
- * We need to be sure that the SCU IPC is ready before MSIC device
- * can be registered.
- */
- if (mrst_has_msic())
- intel_scu_notifier_add(&msic_scu_notifier);
-
- return 0;
-}
-arch_initcall(msic_init);
-
-/*
- * msic_generic_platform_data - sets generic platform data for the block
- * @info: pointer to the SFI device table entry for this block
- * @block: MSIC block
- *
- * Function sets IRQ number from the SFI table entry for given device to
- * the MSIC platform data.
- */
-static void *msic_generic_platform_data(void *info, enum intel_msic_block block)
-{
- struct sfi_device_table_entry *entry = info;
-
- BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
- msic_pdata.irq[block] = entry->irq;
-
- return no_platform_data(info);
-}
-
-static void *msic_battery_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
-}
-
-static void *msic_gpio_platform_data(void *info)
-{
- static struct intel_msic_gpio_pdata pdata;
- int gpio = get_gpio_by_name("msic_gpio_base");
-
- if (gpio < 0)
- return NULL;
-
- pdata.gpio_base = gpio;
- msic_pdata.gpio = &pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
-}
-
-static void *msic_audio_platform_data(void *info)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("failed to create audio platform device\n");
- return NULL;
- }
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
-}
-
-static void *msic_power_btn_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
-}
-
-static void *msic_ocd_platform_data(void *info)
-{
- static struct intel_msic_ocd_pdata pdata;
- int gpio = get_gpio_by_name("ocd_gpio");
-
- if (gpio < 0)
- return NULL;
-
- pdata.gpio = gpio;
- msic_pdata.ocd = &pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
-}
-
-static void *msic_thermal_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
-}
-
-/* tc35876x DSI-LVDS bridge chip and panel platform data */
-static void *tc35876x_platform_data(void *data)
-{
- static struct tc35876x_platform_data pdata;
-
- /* gpio pins set to -1 will not be used by the driver */
- pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
- pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
- pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
-
- return &pdata;
-}
-
-static const struct devs_id __initconst device_ids[] = {
- {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
- {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
- {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
- {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
- {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
- {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
- {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
- {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
- {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
- {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
- {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
- {"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data},
-
- /* MSIC subdevices */
- {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
- {"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data},
- {"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data},
- {"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data},
- {"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data},
- {"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data},
-
- {},
-};
-
-#define MAX_IPCDEVS 24
-static struct platform_device *ipc_devs[MAX_IPCDEVS];
-static int ipc_next_dev;
-
-#define MAX_SCU_SPI 24
-static struct spi_board_info *spi_devs[MAX_SCU_SPI];
-static int spi_next_dev;
-
-#define MAX_SCU_I2C 24
-static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
-static int i2c_bus[MAX_SCU_I2C];
-static int i2c_next_dev;
-
-static void __init intel_scu_device_register(struct platform_device *pdev)
-{
- if(ipc_next_dev == MAX_IPCDEVS)
- pr_err("too many SCU IPC devices");
- else
- ipc_devs[ipc_next_dev++] = pdev;
-}
-
-static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
-{
- struct spi_board_info *new_dev;
-
- if (spi_next_dev == MAX_SCU_SPI) {
- pr_err("too many SCU SPI devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed spi dev %s\n",
- sdev->modalias);
- return;
- }
- memcpy(new_dev, sdev, sizeof(*sdev));
-
- spi_devs[spi_next_dev++] = new_dev;
-}
-
-static void __init intel_scu_i2c_device_register(int bus,
- struct i2c_board_info *idev)
-{
- struct i2c_board_info *new_dev;
-
- if (i2c_next_dev == MAX_SCU_I2C) {
- pr_err("too many SCU I2C devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed i2c dev %s\n",
- idev->type);
- return;
- }
- memcpy(new_dev, idev, sizeof(*idev));
-
- i2c_bus[i2c_next_dev] = bus;
- i2c_devs[i2c_next_dev++] = new_dev;
-}
-
-BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
-EXPORT_SYMBOL_GPL(intel_scu_notifier);
-
-/* Called by IPC driver */
-void intel_scu_devices_create(void)
-{
- int i;
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_add(ipc_devs[i]);
-
- for (i = 0; i < spi_next_dev; i++)
- spi_register_board_info(spi_devs[i], 1);
-
- for (i = 0; i < i2c_next_dev; i++) {
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- adapter = i2c_get_adapter(i2c_bus[i]);
- if (adapter) {
- client = i2c_new_device(adapter, i2c_devs[i]);
- if (!client)
- pr_err("can't create i2c device %s\n",
- i2c_devs[i]->type);
- } else
- i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
- }
- intel_scu_notifier_post(SCU_AVAILABLE, NULL);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_create);
-
-/* Called by IPC driver */
-void intel_scu_devices_destroy(void)
-{
- int i;
-
- intel_scu_notifier_post(SCU_DOWN, NULL);
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_del(ipc_devs[i]);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
-
-static void __init install_irq_resource(struct platform_device *pdev, int irq)
-{
- /* Single threaded */
- static struct resource __initdata res = {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- };
- res.start = irq;
- platform_device_add_resources(pdev, &res, 1);
-}
-
-static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *entry)
-{
- const struct devs_id *dev = device_ids;
- struct platform_device *pdev;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_IPC &&
- !strncmp(dev->name, entry->name, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(entry);
- break;
- }
- dev++;
- }
-
- /*
- * On Medfield the platform device creation is handled by the MSIC
- * MFD driver so we don't need to do it here.
- */
- if (mrst_has_msic())
- return;
-
- pdev = platform_device_alloc(entry->name, 0);
- if (pdev == NULL) {
- pr_err("out of memory for SFI platform device '%s'.\n",
- entry->name);
- return;
- }
- install_irq_resource(pdev, entry->irq);
-
- pdev->dev.platform_data = pdata;
- intel_scu_device_register(pdev);
-}
-
-static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info)
-{
- const struct devs_id *dev = device_ids;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_SPI &&
- !strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(spi_info);
- break;
- }
- dev++;
- }
- spi_info->platform_data = pdata;
- if (dev->delay)
- intel_scu_spi_device_register(spi_info);
- else
- spi_register_board_info(spi_info, 1);
-}
-
-static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
-{
- const struct devs_id *dev = device_ids;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_I2C &&
- !strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(i2c_info);
- break;
- }
- dev++;
- }
- i2c_info->platform_data = pdata;
-
- if (dev->delay)
- intel_scu_i2c_device_register(bus, i2c_info);
- else
- i2c_register_board_info(bus, i2c_info, 1);
- }
-
-
-static int __init sfi_parse_devs(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_device_table_entry *pentry;
- struct spi_board_info spi_info;
- struct i2c_board_info i2c_info;
- int num, i, bus;
- int ioapic;
- struct io_apic_irq_attr irq_attr;
-
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
- pentry = (struct sfi_device_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++, pentry++) {
- int irq = pentry->irq;
-
- if (irq != (u8)0xff) { /* native RTE case */
- /* these SPI2 devices are not exposed to system as PCI
- * devices, but they have separate RTE entry in IOAPIC
- * so we have to enable them one by one here
- */
- ioapic = mp_find_ioapic(irq);
- irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = irq;
- irq_attr.trigger = 1;
- irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, irq, &irq_attr);
- } else
- irq = 0; /* No irq */
-
- switch (pentry->type) {
- case SFI_DEV_TYPE_IPC:
- pr_debug("info[%2d]: IPC bus, name = %16.16s, "
- "irq = 0x%2x\n", i, pentry->name, pentry->irq);
- sfi_handle_ipc_dev(pentry);
- break;
- case SFI_DEV_TYPE_SPI:
- memset(&spi_info, 0, sizeof(spi_info));
- strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = irq;
- spi_info.bus_num = pentry->host_num;
- spi_info.chip_select = pentry->addr;
- spi_info.max_speed_hz = pentry->max_freq;
- pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, "
- "irq = 0x%2x, max_freq = %d, cs = %d\n", i,
- spi_info.bus_num,
- spi_info.modalias,
- spi_info.irq,
- spi_info.max_speed_hz,
- spi_info.chip_select);
- sfi_handle_spi_dev(&spi_info);
- break;
- case SFI_DEV_TYPE_I2C:
- memset(&i2c_info, 0, sizeof(i2c_info));
- bus = pentry->host_num;
- strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = irq;
- i2c_info.addr = pentry->addr;
- pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
- "irq = 0x%2x, addr = 0x%x\n", i, bus,
- i2c_info.type,
- i2c_info.irq,
- i2c_info.addr);
- sfi_handle_i2c_dev(bus, &i2c_info);
- break;
- case SFI_DEV_TYPE_UART:
- case SFI_DEV_TYPE_HSI:
- default:
- ;
- }
- }
- return 0;
-}
-
-static int __init mrst_platform_init(void)
-{
- sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
- sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
- return 0;
-}
-arch_initcall(mrst_platform_init);
-
-/*
- * we will search these buttons in SFI GPIO table (by name)
- * and register them dynamically. Please add all possible
- * buttons here, we will shrink them if no GPIO found.
- */
-static struct gpio_keys_button gpio_button[] = {
- {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
- {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
- {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
- {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
- {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
- {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
- {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
- {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
-};
-
-static struct gpio_keys_platform_data mrst_gpio_keys = {
- .buttons = gpio_button,
- .rep = 1,
- .nbuttons = -1, /* will fill it after search */
-};
-
-static struct platform_device pb_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &mrst_gpio_keys,
- },
-};
-
-/*
- * Shrink the non-existent buttons, register the gpio button
- * device if there is some
- */
-static int __init pb_keys_init(void)
-{
- struct gpio_keys_button *gb = gpio_button;
- int i, num, good = 0;
-
- num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
- for (i = 0; i < num; i++) {
- gb[i].gpio = get_gpio_by_name(gb[i].desc);
- pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio);
- if (gb[i].gpio == -1)
- continue;
-
- if (i != good)
- gb[good] = gb[i];
- good++;
- }
-
- if (good) {
- mrst_gpio_keys.nbuttons = good;
- return platform_device_register(&pb_device);
- }
- return 0;
-}
-late_initcall(pb_keys_init);
static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
-
+ { "BCM43241" },
+ { "BCM2E1A" },
{ }
};
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
+ if (table == NULL) {
+ pr_err("Error mapping ACPI memory\n");
+ return AE_ERROR;
+ }
+
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
- struct tty_struct *tty = hu->tty;
- struct hci_dev *hdev = hu->hdev;
- struct sk_buff *skb;
-
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
return 0;
BT_DBG("");
+ schedule_work(&hu->write_work);
+
+ return 0;
+}
+
+static void hci_uart_write_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
+ struct tty_struct *tty = hu->tty;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+
+ /* REVISIT: should we cope with bad skbs or ->write() returning
+ * and error value ?
+ */
+
restart:
clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
- return 0;
+
}
static void hci_uart_init_work(struct work_struct *work)
tty->receive_room = 65536;
INIT_WORK(&hu->init_ready, hci_uart_init_work);
+ INIT_WORK(&hu->write_work, hci_uart_write_work);
spin_lock_init(&hu->rx_lock);
if (hdev)
hci_uart_close(hdev);
+ cancel_work_sync(&hu->write_work);
+
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
if (hdev) {
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
unsigned long hdev_flags;
struct work_struct init_ready;
+ struct work_struct write_work;
struct hci_uart_proto *proto;
void *priv;
By enabling this option the acpi_cpufreq driver provides the old
entry in addition to the new boost ones, for compatibility reasons.
+config X86_SFI_CPUFREQ
+ tristate "SFI Processor P-States driver"
+ select CPU_FREQ_TABLE
+ depends on SFI
+ help
+ This driver adds a CPUFreq driver which utilizes the SFI
+ Processor Performance States enumeration.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sfi-cpufreq.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
select CPU_FREQ_TABLE
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
+obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
--- /dev/null
+/*
+ * sfi_cpufreq.c - sfi Processor P-States Driver
+ *
+ * (C) 2010-2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Vishwesh M Rudramuni
+ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
+ */
+
+/*
+ * This sfi Processor P-States Driver re-uses most part of the code available
+ * in acpi cpufreq driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/sfi.h>
+#include <linux/io.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+
+#include "sfi-cpufreq.h"
+#include "mperf.h"
+
+MODULE_AUTHOR("Vishwesh Rudramuni");
+MODULE_DESCRIPTION("SFI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+static DEFINE_MUTEX(performance_mutex);
+static int sfi_cpufreq_num;
+static u32 sfi_cpu_num;
+
+#define SFI_FREQ_MAX 32
+#define INTEL_MSR_RANGE 0xffff
+#define INTEL_MSR_BUSRATIO_MASK 0xff00
+#define SFI_CPU_MAX 8
+
+#define X86_ATOM_ARCH_SLM 0x4a
+
+struct sfi_cpufreq_data {
+ struct sfi_processor_performance *sfi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int max_freq;
+ unsigned int resume;
+};
+
+static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
+struct sfi_freq_table_entry sfi_cpufreq_array[SFI_FREQ_MAX];
+static struct sfi_cpu_table_entry sfi_cpu_array[SFI_CPU_MAX];
+
+/* sfi_perf_data is a pointer to percpu data. */
+static struct sfi_processor_performance *sfi_perf_data;
+
+static struct cpufreq_driver sfi_cpufreq_driver;
+
+static int parse_freq(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_freq_table_entry *pentry;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sb) {
+ printk(KERN_WARNING "SFI: Unable to map FREQ\n");
+ return -ENODEV;
+ }
+
+ if (!sfi_cpufreq_num) {
+ sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_freq_table_entry);
+ pentry = (struct sfi_freq_table_entry *)sb->pentry;
+ totallen = sfi_cpufreq_num * sizeof(*pentry);
+ memcpy(sfi_cpufreq_array, pentry, totallen);
+ }
+
+ return 0;
+}
+
+static int sfi_processor_get_performance_states(struct sfi_processor *pr)
+{
+ int result = 0;
+ int i;
+
+ pr->performance->state_count = sfi_cpufreq_num;
+ pr->performance->states =
+ kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
+ GFP_KERNEL);
+ if (!pr->performance->states)
+ result = -ENOMEM;
+
+ printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
+
+ /* Populate the P-states info from the SFI table here */
+ for (i = 0; i < sfi_cpufreq_num; i++) {
+ pr->performance->states[i].core_frequency =
+ sfi_cpufreq_array[i].freq_mhz;
+ pr->performance->states[i].transition_latency =
+ sfi_cpufreq_array[i].latency;
+ pr->performance->states[i].control =
+ sfi_cpufreq_array[i].ctrl_val;
+ printk(KERN_INFO "State [%d]: core_frequency[%d] transition_latency[%d] control[0x%x]\n",
+ i,
+ (u32) pr->performance->states[i].core_frequency,
+ (u32) pr->performance->states[i].transition_latency,
+ (u32) pr->performance->states[i].control);
+ }
+
+ return result;
+}
+
+static int sfi_processor_register_performance(struct sfi_processor_performance
+ *performance, unsigned int cpu)
+{
+ struct sfi_processor *pr;
+
+ mutex_lock(&performance_mutex);
+
+ pr = per_cpu(sfi_processors, cpu);
+ if (!pr) {
+ mutex_unlock(&performance_mutex);
+ return -ENODEV;
+ }
+
+ if (pr->performance) {
+ mutex_unlock(&performance_mutex);
+ return -EBUSY;
+ }
+
+ WARN_ON(!performance);
+
+ pr->performance = performance;
+
+ /* parse the freq table from sfi */
+ sfi_cpufreq_num = 0;
+ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, parse_freq);
+
+ sfi_processor_get_performance_states(pr);
+
+ mutex_unlock(&performance_mutex);
+ return 0;
+}
+
+void sfi_processor_unregister_performance(struct sfi_processor_performance
+ *performance, unsigned int cpu)
+{
+ struct sfi_processor *pr;
+
+
+ mutex_lock(&performance_mutex);
+
+ pr = per_cpu(sfi_processors, cpu);
+ if (!pr) {
+ mutex_unlock(&performance_mutex);
+ return;
+ }
+
+ if (pr->performance)
+ kfree(pr->performance->states);
+ pr->performance = NULL;
+
+ mutex_unlock(&performance_mutex);
+
+ return;
+}
+
+static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
+{
+ int i;
+ struct sfi_processor_performance *perf;
+ u32 sfi_ctrl;
+
+ msr &= INTEL_MSR_BUSRATIO_MASK;
+ perf = data->sfi_data;
+
+ for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ sfi_ctrl = perf->states[data->freq_table[i].index].control
+ & INTEL_MSR_BUSRATIO_MASK;
+ if (sfi_ctrl == msr)
+ return data->freq_table[i].frequency;
+ }
+ return data->freq_table[0].frequency;
+}
+
+
+static u32 get_cur_val(const struct cpumask *mask)
+{
+ u32 val, dummy;
+
+ if (unlikely(cpumask_empty(mask)))
+ return 0;
+
+ rdmsr_on_cpu(cpumask_any(mask), MSR_IA32_PERF_STATUS, &val, &dummy);
+
+ return val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ unsigned int freq;
+ unsigned int cached_freq;
+
+ pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+
+ if (unlikely(data == NULL ||
+ data->sfi_data == NULL || data->freq_table == NULL)) {
+ return 0;
+ }
+
+ cached_freq = data->freq_table[data->sfi_data->state].frequency;
+ freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+ if (freq != cached_freq) {
+ /*
+ * The dreaded BIOS frequency change behind our back.
+ * Force set the frequency on next target call.
+ */
+ data->resume = 1;
+ }
+
+ pr_debug("cur freq = %u\n", freq);
+
+ return freq;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct sfi_processor_performance *perf;
+ struct cpufreq_freqs freqs;
+ unsigned int next_state = 0; /* Index into freq_table */
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ int result = 0;
+ u32 lo, hi;
+
+ pr_debug("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
+
+ if (unlikely(data == NULL ||
+ data->sfi_data == NULL || data->freq_table == NULL)) {
+ return -ENODEV;
+ }
+
+ perf = data->sfi_data;
+ result = cpufreq_frequency_table_target(policy,
+ data->freq_table,
+ target_freq,
+ relation, &next_state);
+ if (unlikely(result))
+ return -ENODEV;
+
+ next_perf_state = data->freq_table[next_state].index;
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n",
+ next_perf_state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n",
+ next_perf_state);
+ return 0;
+ }
+ }
+
+ freqs.old = perf->states[perf->state].core_frequency * 1000;
+ freqs.new = data->freq_table[next_state].frequency;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+ lo = (lo & ~INTEL_MSR_RANGE) |
+ ((u32) perf->states[next_perf_state].control & INTEL_MSR_RANGE);
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ perf->state = next_perf_state;
+
+ return result;
+}
+
+static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+ pr_debug("sfi_cpufreq_verify\n");
+
+ return cpufreq_frequency_table_verify(policy, data->freq_table);
+}
+
+/*
+ * sfi_cpufreq_early_init - initialize SFI P-States library
+ *
+ * Initialize the SFI P-States library (drivers/sfi/processor_perflib.c)
+ * in order to cope with the correct frequency and voltage pairings.
+ */
+static int __init sfi_cpufreq_early_init(void)
+{
+ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
+ if (!sfi_perf_data) {
+ pr_debug("Memory allocation error for sfi_perf_data.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int valid_states = 0;
+ unsigned int cpu = policy->cpu;
+ struct sfi_cpufreq_data *data;
+ unsigned int result = 0;
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+ struct sfi_processor_performance *perf;
+ u32 lo, hi;
+
+ pr_debug("sfi_cpufreq_cpu_init CPU:%d\n", policy->cpu);
+
+ data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
+ per_cpu(drv_data, cpu) = data;
+
+ sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+
+ result = sfi_processor_register_performance(data->sfi_data, cpu);
+ if (result)
+ goto err_free;
+
+ perf = data->sfi_data;
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+ cpumask_set_cpu(policy->cpu, policy->related_cpus);
+
+ /* capability check */
+ if (perf->state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ data->freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (perf->state_count+1), GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i = 0; i < perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency =
+ perf->states[i].transition_latency * 1000;
+ }
+
+ data->max_freq = perf->states[0].core_frequency * 1000;
+ /* table init */
+ for (i = 0; i < perf->state_count; i++) {
+ if (i > 0 && perf->states[i].core_frequency >=
+ data->freq_table[valid_states-1].frequency / 1000)
+ continue;
+
+ data->freq_table[valid_states].index = i;
+ data->freq_table[valid_states].frequency =
+ perf->states[i].core_frequency * 1000;
+ valid_states++;
+ }
+ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ perf->state = 0;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ if (result)
+ goto err_freqfree;
+
+ policy->cur = get_cur_freq_on_cpu(cpu);
+
+
+ /* Check for APERF/MPERF support in hardware */
+ if (cpu_has(c, X86_FEATURE_APERFMPERF))
+ sfi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
+
+ /* enable eHALT for SLM */
+ if (boot_cpu_data.x86_model == X86_ATOM_ARCH_SLM) {
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_POWER_MISC, &lo, &hi);
+ lo = lo | ENABLE_ULFM_AUTOCM | ENABLE_INDP_AUTOCM;
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_POWER_MISC, lo, hi);
+ }
+
+ pr_debug("CPU%u - SFI performance management activated.\n", cpu);
+ for (i = 0; i < perf->state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d uS\n",
+ (i == perf->state ? '*' : ' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].transition_latency);
+
+ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+ /*
+ * the first call to ->target() should result in us actually
+ * writing something to the appropriate registers.
+ */
+ data->resume = 1;
+
+ return result;
+
+err_freqfree:
+ kfree(data->freq_table);
+err_unreg:
+ sfi_processor_unregister_performance(perf, cpu);
+err_free:
+ kfree(data);
+ per_cpu(drv_data, cpu) = NULL;
+
+ return result;
+}
+
+static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+ pr_debug("sfi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ per_cpu(drv_data, policy->cpu) = NULL;
+ sfi_processor_unregister_performance(data->sfi_data,
+ policy->cpu);
+ kfree(data->freq_table);
+ kfree(data);
+ }
+
+ return 0;
+}
+
+static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+ pr_debug("sfi_cpufreq_resume\n");
+
+ data->resume = 1;
+
+ return 0;
+}
+
+static struct freq_attr *sfi_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+ .get = get_cur_freq_on_cpu,
+ .verify = sfi_cpufreq_verify,
+ .target = sfi_cpufreq_target,
+ .init = sfi_cpufreq_cpu_init,
+ .exit = sfi_cpufreq_cpu_exit,
+ .resume = sfi_cpufreq_resume,
+ .name = "sfi-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = sfi_cpufreq_attr,
+};
+
+static int __init parse_cpus(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_cpu_table_entry *pentry;
+ int i;
+
+ sb = (struct sfi_table_simple *)table;
+
+ sfi_cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
+
+ pentry = (struct sfi_cpu_table_entry *) sb->pentry;
+ for (i = 0; i < sfi_cpu_num; i++) {
+ sfi_cpu_array[i].apic_id = pentry->apic_id;
+ printk(KERN_INFO "APIC ID: %d\n", pentry->apic_id);
+ pentry++;
+ }
+
+ return 0;
+
+}
+
+
+static int __init init_sfi_processor_list(void)
+{
+ struct sfi_processor *pr;
+ int i;
+ int result;
+
+ /* parse the cpus from the sfi table */
+ result = sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, parse_cpus);
+
+ if (result < 0)
+ return result;
+
+ pr = kzalloc(sfi_cpu_num * sizeof(struct sfi_processor), GFP_KERNEL);
+ if (!pr)
+ return -ENOMEM;
+
+ for (i = 0; i < sfi_cpu_num; i++) {
+ pr->id = sfi_cpu_array[i].apic_id;
+ per_cpu(sfi_processors, i) = pr;
+ pr++;
+ }
+
+ return 0;
+}
+
+static int __init sfi_cpufreq_init(void)
+{
+ int ret;
+
+ pr_debug("sfi_cpufreq_init\n");
+
+ ret = init_sfi_processor_list();
+ if (ret)
+ return ret;
+
+ ret = sfi_cpufreq_early_init();
+ if (ret)
+ return ret;
+
+ return cpufreq_register_driver(&sfi_cpufreq_driver);
+}
+
+static void __exit sfi_cpufreq_exit(void)
+{
+
+ struct sfi_processor *pr;
+
+ pr_debug("sfi_cpufreq_exit\n");
+
+ pr = per_cpu(sfi_processors, 0);
+ kfree(pr);
+
+ cpufreq_unregister_driver(&sfi_cpufreq_driver);
+
+ free_percpu(sfi_perf_data);
+
+ return;
+}
+late_initcall(sfi_cpufreq_init);
+module_exit(sfi_cpufreq_exit);
+
+MODULE_ALIAS("sfi");
--- /dev/null
+/*
+ * sfi_processor.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __SFI_PROCESSOR_H__
+#define __SFI_PROCESSOR_H__
+
+#include <linux/sfi.h>
+#include <linux/cpuidle.h>
+
+struct sfi_processor_power {
+ struct cpuidle_device dev;
+ u32 default_state;
+ int count;
+ struct cpuidle_state *states;
+ struct sfi_cstate_table_entry *sfi_cstates;
+};
+
+struct sfi_processor_flags {
+ u8 valid;
+ u8 power;
+};
+
+struct sfi_processor {
+ u32 id;
+ struct sfi_processor_flags flags;
+ struct sfi_processor_power power;
+ struct sfi_processor_performance *performance;
+};
+
+/* Performance management */
+struct sfi_processor_px {
+ u32 core_frequency; /* megahertz */
+ u32 transition_latency; /* microseconds */
+ u32 control; /* control value */
+};
+
+struct sfi_processor_performance {
+ unsigned int state;
+ unsigned int state_count;
+ struct sfi_processor_px *states;
+};
+
+/* for communication between multiple parts of the processor kernel module */
+DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+int sfi_processor_power_init(struct sfi_processor *pr);
+int sfi_processor_power_exit(struct sfi_processor *pr);
+
+#endif /*__SFI_PROCESSOR_H__*/
obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
-obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o intel_mid_dma_acpi.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h>
#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
#include "dmaengine.h"
-#define MAX_CHAN 4 /*max ch across controllers*/
+#define MAX_CHAN 8 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"
#define INTEL_MID_DMAC1_ID 0x0814
#define INTEL_MID_DMAC2_ID 0x0813
#define INTEL_MID_GP_DMAC2_ID 0x0827
#define INTEL_MFLD_DMAC1_ID 0x0830
-#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
-#define LNW_PERIPHRAL_MASK_SIZE 0x10
-#define LNW_PERIPHRAL_STATUS 0x0
-#define LNW_PERIPHRAL_MASK 0x8
-
-struct intel_mid_dma_probe_info {
- u8 max_chan;
- u8 ch_base;
- u16 block_size;
- u32 pimr_mask;
-};
-
-#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
+#define INTEL_CLV_GP_DMAC2_ID 0x08EF
+#define INTEL_CLV_DMAC1_ID 0x08F0
+#define INTEL_MRFLD_GP_DMAC2_ID 0x11A2
+#define INTEL_MRFLD_DMAC0_ID 0x119B
+#define INTEL_BYT_LPIO1_DMAC_ID 0x0F06
+#define INTEL_BYT_LPIO2_DMAC_ID 0x0F40
+#define INTEL_BYT_DMAC0_ID 0x0F28
+
+#define LNW_PERIPHRAL_MASK_SIZE 0x20
+
+#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask, \
+ _pimr_base, _dword_trf, _pimr_offset, _pci_id, \
+ _pdma_ops) \
((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
.max_chan = (_max_chan), \
.ch_base = (_ch_base), \
.block_size = (_block_size), \
.pimr_mask = (_pimr_mask), \
+ .pimr_base = (_pimr_base), \
+ .dword_trf = (_dword_trf), \
+ .pimr_offset = (_pimr_offset), \
+ .pci_id = (_pci_id), \
+ .pdma_ops = (_pdma_ops) \
})
/*****************************************************************************
* @status: status mask
* @base: dma ch base value
*
- * Modify the status mask and return the channel index needing
- * attention (or -1 if neither)
+ * Returns the channel index by checking the status bits.
+ * If none of the bits in status are set, then returns -1.
*/
-static int get_ch_index(int *status, unsigned int base)
+static int get_ch_index(int status, unsigned int base)
{
int i;
for (i = 0; i < MAX_CHAN; i++) {
- if (*status & (1 << (i + base))) {
- *status = *status & ~(1 << (i + base));
- pr_debug("MDMA: index %d New status %x\n", i, *status);
+ if (status & (1 << (i + base)))
return i;
- }
}
return -1;
}
+static inline bool is_byt_lpio_dmac(struct middma_device *mid)
+{
+ return (mid->pci_id == INTEL_BYT_LPIO1_DMAC_ID ||
+ mid->pci_id == INTEL_BYT_LPIO2_DMAC_ID);
+}
+
+static void dump_dma_reg(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct middma_device *mid = to_middma_device(chan->device);
+
+ if (!mid->pimr_base)
+ return;
+
+ pr_debug("<<<<<<<<<<<< DMA Dump Start >>>>>>>>>>>>");
+ pr_debug("DMA Dump for Channel id:%d & Chnl Base:%p",
+ midc->ch_id, midc->ch_regs);
+ /* dump common DMA registers */
+ pr_debug("PIMR:\t%#x", readl(mid->mask_reg) - 8);
+ pr_debug("ISRX:\t%#x", readl(mid->mask_reg));
+ pr_debug("ISRD:\t%#x", readl(mid->mask_reg + 0x8));
+ pr_debug("IMRX:\t%#x", readl(mid->mask_reg + 0x10));
+ pr_debug("IMRD:\t%#x", readl(mid->mask_reg + 0x18));
+ pr_debug("DMA_CHAN_EN:\t%#x", readl(midc->dma_base + DMA_CHAN_EN));
+ pr_debug("DMA_CFG:\t%#x", readl(midc->dma_base + DMA_CFG));
+ pr_debug("INTR_STATUS:\t%#x", readl(midc->dma_base + INTR_STATUS));
+ pr_debug("MASK_TFR:\t%#x", readl(midc->dma_base + MASK_TFR));
+ pr_debug("MASK_BLOCK:\t%#x", readl(midc->dma_base + MASK_BLOCK));
+ pr_debug("MASK_ERR:\t%#x", readl(midc->dma_base + MASK_ERR));
+ pr_debug("RAW_TFR:\t%#x", readl(midc->dma_base + RAW_TFR));
+ pr_debug("RAW_BLOCK:\t%#x", readl(midc->dma_base + RAW_BLOCK));
+ pr_debug("RAW_ERR:\t%#x", readl(midc->dma_base + RAW_ERR));
+ pr_debug("STATUS_TFR:\t%#x", readl(midc->dma_base + STATUS_TFR));
+ pr_debug("STATUS_BLOCK:\t%#x", readl(midc->dma_base + STATUS_BLOCK));
+ pr_debug("STATUS_ERR:\t%#x", readl(midc->dma_base + STATUS_ERR));
+ if (!mid->dword_trf) {
+ pr_debug("FIFO_PARTITION0_LO:\t%#x",
+ readl(midc->dma_base + FIFO_PARTITION0_LO));
+ pr_debug("FIFO_PARTITION0_HI:\t%#x",
+ readl(midc->dma_base + FIFO_PARTITION0_HI));
+ pr_debug("FIFO_PARTITION1_LO:\t%#x",
+ readl(midc->dma_base + FIFO_PARTITION1_LO));
+ pr_debug("FIFO_PARTITION1_HI:\t%#x",
+ readl(midc->dma_base + FIFO_PARTITION1_HI));
+ pr_debug("CH_SAI_ERR:\t%#x", readl(midc->dma_base + CH_SAI_ERR));
+ }
+
+ /* dump channel specific registers */
+ pr_debug("SAR:\t%#x", readl(midc->ch_regs + SAR));
+ pr_debug("DAR:\t%#x", readl(midc->ch_regs + DAR));
+ pr_debug("LLP:\t%#x", readl(midc->ch_regs + LLP));
+ pr_debug("CTL_LOW:\t%#x", readl(midc->ch_regs + CTL_LOW));
+ pr_debug("CTL_HIGH:\t%#x", readl(midc->ch_regs + CTL_HIGH));
+ pr_debug("CFG_LOW:\t%#x", readl(midc->ch_regs + CFG_LOW));
+ pr_debug("CFG_HIGH:\t%#x", readl(midc->ch_regs + CFG_HIGH));
+ pr_debug("<<<<<<<<<<<< DMA Dump ends >>>>>>>>>>>>");
+}
+
/**
* get_block_ts - calculates dma transaction length
* @len: dma transfer length
* @tx_width: dma transfer src width
* @block_size: dma controller max block size
+ * @dword_trf: is transfer dword size aligned and needs the data transfer to
+ * be in terms of data items and not bytes
*
* Based on src width calculate the DMA trsaction length in data items
* return data items or FFFF if exceeds max length for block
*/
-static int get_block_ts(int len, int tx_width, int block_size)
+static unsigned int get_block_ts(int len, int tx_width,
+ int block_size, int dword_trf)
{
int byte_width = 0, block_ts = 0;
byte_width = 4;
break;
}
-
- block_ts = len/byte_width;
+ if (dword_trf)
+ block_ts = len/byte_width;
+ else
+ block_ts = len;
if (block_ts > block_size)
block_ts = 0xFFFF;
return block_ts;
}
+/**
+ * get_reg_width - computes the DMA sample width
+ * @kernel_width: Kernel DMA slave bus width
+ *
+ * converts the DMA kernel slave bus width in the Intel DMA
+ * bus width
+ */
+static int get_reg_width(enum dma_slave_buswidth kernel_width)
+{
+ int reg_width = -1;
+
+ switch (kernel_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ reg_width = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ reg_width = 1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ reg_width = 2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_UNDEFINED:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ default:
+ pr_err("ERR_MDMA: get_reg_width unsupported reg width\n");
+ break;
+ }
+ return reg_width;
+}
+
+
/*****************************************************************************
DMAC1 interrupt Functions*/
u32 pimr;
if (mid->pimr_mask) {
- pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+ pimr = readl(mid->mask_reg + mid->pimr_offset);
pimr |= mid->pimr_mask;
- writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+ writel(pimr, mid->mask_reg + mid->pimr_offset);
}
return;
}
u32 pimr;
struct middma_device *mid = to_middma_device(midc->chan.device);
- if (mid->pimr_mask) {
- pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+ if (mid->pimr_mask && mid->dword_trf) {
+ pimr = readl(mid->mask_reg + mid->pimr_offset);
pimr &= ~mid->pimr_mask;
- writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+ writel(pimr, mid->mask_reg + mid->pimr_offset);
+ }
+ if (mid->pimr_mask && !mid->dword_trf) {
+ pimr = readl(mid->mask_reg + mid->pimr_offset);
+ pimr &= ~(1 << (midc->ch_id + 16));
+ writel(pimr, mid->mask_reg + mid->pimr_offset);
}
return;
}
+/*
+ * Some consumer may need to know how many bytes have been
+ * really transfered for one specific dma channel
+ */
+inline dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ return readl(midc->ch_regs + SAR);
+}
+EXPORT_SYMBOL(intel_dma_get_src_addr);
+
+inline dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ return readl(midc->ch_regs + DAR);
+}
+EXPORT_SYMBOL(intel_dma_get_dst_addr);
+
/**
* enable_dma_interrupt - enable the periphral interrupt
* @midc: dma channel for which enable interrupt is required
*/
static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+
dmac1_unmask_periphral_intr(midc);
/*en ch interrupts*/
iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+ set_bit(midc->ch_id, &mid->tfr_intr_mask);
iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
return;
}
*/
static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+ u32 pimr;
+
/*Check LPE PISR, make sure fwd is disabled*/
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
+ clear_bit(midc->ch_id, &mid->block_intr_mask);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+ clear_bit(midc->ch_id, &mid->tfr_intr_mask);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+ if (mid->pimr_mask && !mid->dword_trf) {
+ pimr = readl(mid->mask_reg + mid->pimr_offset);
+ pimr |= (1 << (midc->ch_id + 16));
+ writel(pimr, mid->mask_reg + mid->pimr_offset);
+ }
+
+ return;
+}
+
+/**
+ * clear_dma_channel_interrupt - clear channel interrupt
+ * @midc: dma channel for which clear interrupt is required
+ *
+ */
+static void clear_dma_channel_interrupt(struct intel_mid_dma_chan *midc)
+{
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+
+ /*clearing this interrupts first*/
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
+
+
return;
}
* Load a transaction into the engine. This must be called with midc->lock
* held and bh disabled.
*/
-static void midc_dostart(struct intel_mid_dma_chan *midc,
+static int midc_dostart(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_desc *first)
{
struct middma_device *mid = to_middma_device(midc->chan.device);
/*error*/
pr_err("ERR_MDMA: channel is busy in start\n");
/* The tasklet will hopefully advance the queue... */
- return;
+ return -EBUSY;
}
midc->busy = true;
/*write registers and en*/
iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
- pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
+ pr_debug("MDMA:TX SAR %x,DAR %x,CFGH %x,CFGL %x,CTLH %x, CTLL %x LLI %x",
(int)first->sar, (int)first->dar, first->cfg_hi,
- first->cfg_lo, first->ctl_hi, first->ctl_lo);
+ first->cfg_lo, first->ctl_hi, first->ctl_lo, (int)first->lli_phys);
first->status = DMA_IN_PROGRESS;
iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+ return 0;
}
/**
else
desc->current_lli = 0;
}
- spin_unlock_bh(&midc->lock);
- if (callback_txd) {
- pr_debug("MDMA: TXD callback set ... calling\n");
- callback_txd(param_txd);
- }
if (midc->raw_tfr) {
+ list_del(&desc->desc_node);
desc->status = DMA_SUCCESS;
- if (desc->lli != NULL) {
- pci_pool_free(desc->lli_pool, desc->lli,
+ if (desc->lli != NULL && desc->lli->llp != 0)
+ dma_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
- pci_pool_destroy(desc->lli_pool);
- desc->lli = NULL;
- }
- list_move(&desc->desc_node, &midc->free_list);
+ list_add(&desc->desc_node, &midc->free_list);
midc->busy = false;
+ midc->raw_tfr = 0;
+ spin_unlock_bh(&midc->lock);
+ } else {
+ spin_unlock_bh(&midc->lock);
+ }
+ if (callback_txd) {
+ pr_debug("MDMA: TXD callback set ... calling\n");
+ callback_txd(param_txd);
}
+
spin_lock_bh(&midc->lock);
+}
+static struct
+intel_mid_dma_desc *midc_first_queued(struct intel_mid_dma_chan *midc)
+{
+ return list_entry(midc->queue.next, struct intel_mid_dma_desc, desc_node);
}
-/**
- * midc_scan_descriptors - check the descriptors in channel
- * mark completed when tx is completete
- * @mid: device
- * @midc: channel to scan
- *
- * Walk the descriptor chain for the device and process any entries
- * that are complete.
- */
-static void midc_scan_descriptors(struct middma_device *mid,
+
+static void midc_collect_descriptors(struct middma_device *mid,
struct intel_mid_dma_chan *midc)
{
struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
-
/*tx is complete*/
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
if (desc->status == DMA_IN_PROGRESS)
midc_descriptor_complete(midc, desc);
}
- return;
+
+}
+
+/**
+ * midc_start_descriptors - start the descriptors in queue
+ *
+ * @mid: device
+ * @midc: channel to scan
+ *
+ */
+static void midc_start_descriptors(struct middma_device *mid,
+ struct intel_mid_dma_chan *midc)
+{
+ if (!list_empty(&midc->queue)) {
+ pr_debug("MDMA: submitting txn in queue\n");
+ if (0 == midc_dostart(midc, midc_first_queued(midc)))
+ list_splice_init(&midc->queue, &midc->active_list);
+ else
+ pr_warn("Submit failed as ch is busy\n");
}
+ return;
+}
+
/**
* midc_lli_fill_sg - Helper function to convert
* SG list to Linked List Items.
*/
static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_desc *desc,
- struct scatterlist *sglist,
+ struct scatterlist *src_sglist,
+ struct scatterlist *dst_sglist,
unsigned int sglen,
unsigned int flags)
{
dma_addr_t lli_next, sg_phy_addr;
struct intel_mid_dma_lli *lli_bloc_desc;
union intel_mid_dma_ctl_lo ctl_lo;
- union intel_mid_dma_ctl_hi ctl_hi;
+ u32 ctl_hi;
int i;
- pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+ pr_debug("MDMA: Entered %s\n", __func__);
mids = midc->mid_slave;
lli_bloc_desc = desc->lli;
lli_next = desc->lli_phys;
ctl_lo.ctl_lo = desc->ctl_lo;
- ctl_hi.ctl_hi = desc->ctl_hi;
- for_each_sg(sglist, sg, sglen, i) {
+ ctl_hi = desc->ctl_hi;
+ for_each_sg(src_sglist, sg, sglen, i) {
/*Populate CTL_LOW and LLI values*/
if (i != sglen - 1) {
lli_next = lli_next +
lli_next = desc->lli_phys;
} else {
lli_next = 0;
- ctl_lo.ctlx.llp_dst_en = 0;
- ctl_lo.ctlx.llp_src_en = 0;
+ /* llp_dst_en = 0 llp_src_en = 0 */
+ ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_DST_EN);
+ ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_SRC_EN);
}
}
/*Populate CTL_HI values*/
- ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
- desc->width,
- midc->dma->block_size);
+ ctl_hi = get_block_ts(sg->length, desc->width,
+ midc->dma->block_size, midc->dma->dword_trf);
/*Populate SAR and DAR values*/
- sg_phy_addr = sg_dma_address(sg);
+ sg_phy_addr = sg_phys(sg);
if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
} else if (desc->dirn == DMA_DEV_TO_MEM) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
+ } else if (desc->dirn == DMA_MEM_TO_MEM && dst_sglist) {
+ lli_bloc_desc->sar = sg_phy_addr;
+ lli_bloc_desc->dar = sg_phys(dst_sglist);
}
/*Copy values into block descriptor in system memroy*/
lli_bloc_desc->llp = lli_next;
lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
- lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+ lli_bloc_desc->ctl_hi = ctl_hi;
+ pr_debug("MDMA:Calc CTL LO %x, CTL HI %x src: %x dest: %x sg->l:%x\n",
+ ctl_lo.ctl_lo, lli_bloc_desc->ctl_hi,
+ lli_bloc_desc->sar, lli_bloc_desc->dar, sg->length);
lli_bloc_desc++;
+ if (dst_sglist)
+ dst_sglist = sg_next(dst_sglist);
}
/*Copy very first LLI values to descriptor*/
desc->ctl_lo = desc->lli->ctl_lo;
return 0;
}
+
/*****************************************************************************
DMA engine callback Functions*/
/**
* intel_mid_dma_tx_submit - callback to submit DMA transaction
* @tx: dma engine descriptor
*
- * Submit the DMA transaction for this descriptor, start if ch idle
+ * Submit the DMA trasaction for this descriptor, start if ch idle
*/
static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
dma_cookie_t cookie;
spin_lock_bh(&midc->lock);
+
+ if (unlikely(!midc->in_use)) {
+ spin_unlock_bh(&midc->lock);
+ WARN(1, "chan[%d] gets new request after close",
+ tx->chan->chan_id);
+ return -EIO;
+ }
+
cookie = dma_cookie_assign(tx);
if (list_empty(&midc->active_list))
spin_lock_bh(&midc->lock);
if (!list_empty(&midc->queue))
- midc_scan_descriptors(to_middma_device(chan->device), midc);
+ midc_start_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
}
/**
+ * dma_wait_for_suspend - performs following functionality
+ * 1. Suspends channel using mask bits
+ * 2. Wait till FIFO to get empty
+ * 3. Disable channel
+ * 4. restore the previous masked bits
+ *
+ * @chan: chan where pending trascation needs to be checked and submitted
+ * @mask: mask bits to be used for suspend operation
+ *
+ */
+static inline void dma_wait_for_suspend(struct dma_chan *chan, unsigned int mask)
+{
+ union intel_mid_dma_cfg_lo cfg_lo;
+ struct middma_device *mid = to_middma_device(chan->device);
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ int i;
+
+ /* Suspend channel */
+ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+ cfg_lo.cfg_lo |= mask;
+ iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+ /* wait till FIFO gets empty */
+ /* FIFO should be cleared in couple of milli secs */
+ for (i = 0; i < 10; i++) {
+ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+ if (cfg_lo.cfgx.fifo_empty)
+ break;
+ /* use delay since this might called from atomic context */
+ mdelay(1);
+ }
+ pr_debug("waited for %d ms for FIFO to get empty", i);
+ iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+
+ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+ cfg_lo.cfg_lo &= ~mask;
+ iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+}
+/**
+ * intel_mid_dma_chan_suspend_v1 - suspends the given channel, waits
+ * till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v1(struct dma_chan *chan)
+{
+
+ pr_debug("%s", __func__);
+ dma_wait_for_suspend(chan, CH_SUSPEND);
+}
+
+/**
+ * intel_mid_dma_chan_suspend_v2 - suspends the given channel, waits
+ * till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v2(struct dma_chan *chan)
+{
+ pr_debug("%s", __func__);
+ dma_wait_for_suspend(chan, CH_SUSPEND | CH_DRAIN);
+}
+
+/**
* intel_mid_dma_tx_status - Return status of txn
* @chan: chan for where status needs to be checked
* @cookie: cookie for txn
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
spin_lock_bh(&midc->lock);
- midc_scan_descriptors(to_middma_device(chan->device), midc);
+ midc_start_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
ret = dma_cookie_status(chan, cookie, txstate);
midc->mid_slave = mid_slave;
return 0;
}
+
/**
* intel_mid_dma_device_control - DMA device control
* @chan: chan for DMA control
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
- union intel_mid_dma_cfg_lo cfg_lo;
+ pr_debug("%s:CMD:%d for channel:%d\n", __func__, cmd, midc->ch_id);
if (cmd == DMA_SLAVE_CONFIG)
return dma_slave_control(chan, arg);
spin_unlock_bh(&midc->lock);
return 0;
}
- /*Suspend and disable the channel*/
- cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
- cfg_lo.cfgx.ch_susp = 1;
- iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
- iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
- midc->busy = false;
- /* Disable interrupts */
+ /* Disable CH interrupts */
disable_dma_interrupt(midc);
+ /* clear channel interrupts */
+ clear_dma_channel_interrupt(midc);
+ mid->dma_ops.dma_chan_suspend(chan);
+ midc->busy = false;
midc->descs_allocated = 0;
-
- spin_unlock_bh(&midc->lock);
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
- if (desc->lli != NULL) {
- pci_pool_free(desc->lli_pool, desc->lli,
+ list_del(&desc->desc_node);
+ if (desc->lli != NULL)
+ dma_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
- pci_pool_destroy(desc->lli_pool);
- desc->lli = NULL;
- }
- list_move(&desc->desc_node, &midc->free_list);
+ list_add(&desc->desc_node, &midc->free_list);
}
+ spin_unlock_bh(&midc->lock);
+
return 0;
}
-
/**
* intel_mid_dma_prep_memcpy - Prep memcpy txn
* @chan: chan for DMA transfer
struct intel_mid_dma_desc *desc = NULL;
struct intel_mid_dma_slave *mids;
union intel_mid_dma_ctl_lo ctl_lo;
- union intel_mid_dma_ctl_hi ctl_hi;
+ u32 ctl_hi;
union intel_mid_dma_cfg_lo cfg_lo;
union intel_mid_dma_cfg_hi cfg_hi;
enum dma_slave_buswidth width;
+ int dst_reg_width = 0;
+ int src_reg_width = 0;
pr_debug("MDMA: Prep for memcpy\n");
BUG_ON(!chan);
mids = midc->mid_slave;
BUG_ON(!mids);
+ if (unlikely(!midc->in_use)) {
+ pr_err("ERR_MDMA: %s: channel not in use", __func__);
+ return NULL;
+ }
+
pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
midc->dma->pci_id, midc->ch_id, len);
pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
}
} else {
cfg_hi.cfgx.protctl = 0x1; /*default value*/
- cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+ /* Baytrail DMAC uses dynamic device instance */
+ if (is_byt_lpio_dmac(midc->dma))
+ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+ mids->device_instance;
+ else
+ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
midc->ch_id - midc->dma->chan_base;
}
}
-
/*calculate CTL_HI*/
- ctl_hi.ctlx.reser = 0;
- ctl_hi.ctlx.done = 0;
width = mids->dma_slave.src_addr_width;
-
- ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
+ ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
pr_debug("MDMA:calc len %d for block size %d\n",
- ctl_hi.ctlx.block_ts, midc->dma->block_size);
+ ctl_hi, midc->dma->block_size);
/*calculate CTL_LO*/
ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1;
+
+ dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+ if (dst_reg_width < 0) {
+ pr_err("ERR_MDMA: Failed to get DST reg width\n");
+ return NULL;
+
+ }
+ ctl_lo.ctlx.dst_tr_width = dst_reg_width;
+
+ src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+ if (src_reg_width < 0) {
+ pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+ return NULL;
+ }
+ ctl_lo.ctlx.src_tr_width = src_reg_width;
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
- /*
- * Here we need some translation from "enum dma_slave_buswidth"
- * to the format for our dma controller
- * standard intel_mid_dmac's format
- * 1 Byte 0b000
- * 2 Bytes 0b001
- * 4 Bytes 0b010
- */
- ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
- ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
-
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0;
}
pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
- ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+ ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
enable_dma_interrupt(midc);
desc->cfg_hi = cfg_hi.cfg_hi;
desc->cfg_lo = cfg_lo.cfg_lo;
desc->ctl_lo = ctl_lo.ctl_lo;
- desc->ctl_hi = ctl_hi.ctl_hi;
+ desc->ctl_hi = ctl_hi;
desc->width = width;
desc->dirn = mids->dma_slave.direction;
desc->lli_phys = 0;
midc_desc_put(midc, desc);
return NULL;
}
+
/**
- * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+ * intel_mid_dma_prep_memcpy_v2 - Prep memcpy txn
* @chan: chan for DMA transfer
- * @sgl: scatter gather list
- * @sg_len: length of sg txn
- * @direction: DMA transfer dirtn
+ * @dest: destn address
+ * @src: src address
+ * @len: DMA transfer len
* @flags: DMA flags
- * @context: transfer context (ignored)
+ *
+ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
+ * The periphral txn details should be filled in slave structure properly
+ * Returns the descriptor for this txn
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy_v2(
+ struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct intel_mid_dma_chan *midc;
+ struct intel_mid_dma_desc *desc = NULL;
+ struct intel_mid_dma_slave *mids;
+ union intel_mid_dma_ctl_lo ctl_lo;
+ u32 ctl_hi;
+ union intel_mid_dma_cfg_lo cfg_lo;
+ union intel_mid_dma_cfg_hi cfg_hi;
+ enum dma_slave_buswidth width;
+ int dst_reg_width = 0;
+ int src_reg_width = 0;
+
+ pr_debug("MDMA:%s\n", __func__);
+ BUG_ON(!chan);
+ if (!len)
+ return NULL;
+
+ midc = to_intel_mid_dma_chan(chan);
+ BUG_ON(!midc);
+
+ mids = midc->mid_slave;
+ BUG_ON(!mids);
+
+ if (unlikely(!midc->in_use)) {
+ pr_err("ERR_MDMA: %s: channel not in use", __func__);
+ return NULL;
+ }
+
+ pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
+ midc->dma->pci_id, midc->ch_id, len);
+ pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
+ mids->cfg_mode, mids->dma_slave.direction,
+ mids->hs_mode, mids->dma_slave.src_addr_width);
+
+ /*calculate CFG_LO*/
+ cfg_lo.cfgx_v2.dst_burst_align = 1;
+ cfg_lo.cfgx_v2.src_burst_align = 1;
+
+ /*calculate CFG_HI*/
+ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+ /*SW HS only*/
+ cfg_hi.cfg_hi = 0;
+ } else {
+ cfg_hi.cfg_hi = 0;
+ if (midc->dma->pimr_mask) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+ cfg_hi.cfgx_v2.src_per = 0;
+ if (mids->device_instance == 0)
+ cfg_hi.cfgx_v2.dst_per = 1;
+ if (mids->device_instance == 1)
+ cfg_hi.cfgx_v2.dst_per = 3;
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+ if (mids->device_instance == 0)
+ cfg_hi.cfgx_v2.src_per = 0;
+ if (mids->device_instance == 1)
+ cfg_hi.cfgx_v2.src_per = 2;
+ cfg_hi.cfgx_v2.dst_per = 0;
+ }
+ } else {
+ cfg_hi.cfgx_v2.src_per = cfg_hi.cfgx_v2.dst_per =
+ midc->ch_id - midc->dma->chan_base;
+ }
+ }
+ /*calculate CTL_HI*/
+ width = mids->dma_slave.src_addr_width;
+ ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
+ pr_debug("MDMA:calc len %d for block size %d\n",
+ ctl_hi, midc->dma->block_size);
+ /*calculate CTL_LO*/
+ ctl_lo.ctl_lo = 0;
+ ctl_lo.ctlx_v2.int_en = 1;
+
+ dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+ if (dst_reg_width < 0) {
+ pr_err("ERR_MDMA: Failed to get DST reg width\n");
+ return NULL;
+
+ }
+ ctl_lo.ctlx_v2.dst_tr_width = dst_reg_width;
+
+ src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+ if (src_reg_width < 0) {
+ pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+ return NULL;
+ }
+ ctl_lo.ctlx_v2.src_tr_width = src_reg_width;
+ ctl_lo.ctlx_v2.dst_msize = mids->dma_slave.src_maxburst;
+ ctl_lo.ctlx_v2.src_msize = mids->dma_slave.dst_maxburst;
+
+ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+ ctl_lo.ctlx_v2.tt_fc = 0;
+ ctl_lo.ctlx_v2.sinc = 0;
+ ctl_lo.ctlx_v2.dinc = 0;
+ } else {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+ ctl_lo.ctlx_v2.sinc = 0;
+ ctl_lo.ctlx_v2.dinc = 1;
+ ctl_lo.ctlx_v2.tt_fc = 1;
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+ ctl_lo.ctlx_v2.sinc = 1;
+ ctl_lo.ctlx_v2.dinc = 0;
+ ctl_lo.ctlx_v2.tt_fc = 2;
+ }
+ }
+
+ pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
+ ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+
+ enable_dma_interrupt(midc);
+
+ desc = midc_desc_get(midc);
+ if (desc == NULL)
+ goto err_desc_get;
+ desc->sar = src;
+ desc->dar = dest ;
+ desc->len = len;
+ desc->cfg_hi = cfg_hi.cfg_hi;
+ desc->cfg_lo = cfg_lo.cfg_lo;
+ desc->ctl_lo = ctl_lo.ctl_lo;
+ desc->ctl_hi = ctl_hi;
+ desc->width = width;
+ desc->dirn = mids->dma_slave.direction;
+ desc->lli_phys = 0;
+ desc->lli = NULL;
+ desc->lli_pool = NULL;
+ return &desc->txd;
+
+err_desc_get:
+ pr_err("ERR_MDMA: Failed to get desc\n");
+ midc_desc_put(midc, desc);
+ return NULL;
+}
+
+/**
+ * intel_mid_dma_chan_prep_desc
+ * @chan: chan for DMA transfer
+ * @src_sg: destination scatter gather list
+ * @dst_sg: source scatter gather list
+ * @flags: DMA flags
+ * @src_sg_len: length of src sg list
+ * @direction DMA transfer dirtn
*
* Prepares LLI based periphral transfer
*/
-static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *intel_mid_dma_chan_prep_desc(
+ struct dma_chan *chan, struct scatterlist *src_sg,
+ struct scatterlist *dst_sg, unsigned long flags,
+ unsigned long src_sg_len,
+ enum dma_transfer_direction direction)
{
+ struct middma_device *mid = NULL;
struct intel_mid_dma_chan *midc = NULL;
struct intel_mid_dma_slave *mids = NULL;
struct intel_mid_dma_desc *desc = NULL;
struct dma_async_tx_descriptor *txd = NULL;
union intel_mid_dma_ctl_lo ctl_lo;
+ pr_debug("MDMA:intel_mid_dma_chan_prep_desc\n");
- pr_debug("MDMA: Prep for slave SG\n");
-
- if (!sg_len) {
- pr_err("MDMA: Invalid SG length\n");
- return NULL;
- }
midc = to_intel_mid_dma_chan(chan);
BUG_ON(!midc);
+ mid = to_middma_device(midc->chan.device);
mids = midc->mid_slave;
BUG_ON(!mids);
if (!midc->dma->pimr_mask) {
- /* We can still handle sg list with only one item */
- if (sg_len == 1) {
- txd = intel_mid_dma_prep_memcpy(chan,
- mids->dma_slave.dst_addr,
- mids->dma_slave.src_addr,
- sg_dma_len(sgl),
- flags);
- return txd;
- } else {
- pr_warn("MDMA: SG list is not supported by this controller\n");
- return NULL;
- }
+ pr_err("MDMA: SG list is not supported by this controller\n");
+ return NULL;
}
- pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
- sg_len, direction, flags);
-
- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
+ txd = midc->dma->dma_ops.device_prep_dma_memcpy(chan, 0, 0, src_sg->length, flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
desc = to_intel_mid_dma_desc(txd);
desc->dirn = direction;
ctl_lo.ctl_lo = desc->ctl_lo;
- ctl_lo.ctlx.llp_dst_en = 1;
- ctl_lo.ctlx.llp_src_en = 1;
+ ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_DST_EN);
+ ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_SRC_EN);
desc->ctl_lo = ctl_lo.ctl_lo;
- desc->lli_length = sg_len;
+ desc->lli_length = src_sg_len;
desc->current_lli = 0;
/* DMA coherent memory pool for LLI descriptors*/
- desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
- midc->dma->pdev,
- (sizeof(struct intel_mid_dma_lli)*sg_len),
+ desc->lli_pool = dma_pool_create("intel_mid_dma_lli_pool",
+ midc->dma->dev,
+ (sizeof(struct intel_mid_dma_lli)*src_sg_len),
32, 0);
if (NULL == desc->lli_pool) {
pr_err("MID_DMA:LLI pool create failed\n");
return NULL;
}
+ midc->lli_pool = desc->lli_pool;
- desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+ desc->lli = dma_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
if (!desc->lli) {
pr_err("MID_DMA: LLI alloc failed\n");
- pci_pool_destroy(desc->lli_pool);
+ dma_pool_destroy(desc->lli_pool);
return NULL;
}
-
- midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
+ midc_lli_fill_sg(midc, desc, src_sg, dst_sg, src_sg_len, flags);
if (flags & DMA_PREP_INTERRUPT) {
+ /* Enable Block intr, disable TFR intr.
+ * It's not required to enable TFR, when Block intr is enabled
+ * Otherwise, for last block we will end up in invoking calltxd
+ * two times */
+
+ iowrite32(MASK_INTR_REG(midc->ch_id),
+ midc->dma_base + MASK_TFR);
+ clear_bit(midc->ch_id, &mid->tfr_intr_mask);
iowrite32(UNMASK_INTR_REG(midc->ch_id),
- midc->dma_base + MASK_BLOCK);
- pr_debug("MDMA:Enabled Block interrupt\n");
+ midc->dma_base + MASK_BLOCK);
+ set_bit(midc->ch_id, &mid->block_intr_mask);
+ midc->block_intr_status = true;
+ pr_debug("MDMA: Enabled Block Interrupt\n");
}
return &desc->txd;
+
+}
+
+/**
+ * intel_mid_dma_prep_sg - Prep sg txn
+ * @chan: chan for DMA transfer
+ * @dst_sg: destination scatter gather list
+ * @dst_sg_len: length of dest sg list
+ * @src_sg: source scatter gather list
+ * @src_sg_len: length of src sg list
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_sg(
+ struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+
+ pr_debug("MDMA: Prep for memcpy SG\n");
+
+ if ((dst_sg_len != src_sg_len) || (dst_sg == NULL) ||
+ (src_sg == NULL)) {
+ pr_err("MDMA: Invalid SG length\n");
+ return NULL;
+ }
+
+ pr_debug("MDMA: SG Length = %d, Flags = %#lx, src_sg->length = %d\n",
+ src_sg_len, flags, src_sg->length);
+
+ return intel_mid_dma_chan_prep_desc(chan, src_sg, dst_sg, flags,
+ src_sg_len, DMA_MEM_TO_MEM);
+
+}
+
+/**
+ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+ * @chan: chan for DMA transfer
+ * @sgl: scatter gather list
+ * @sg_len: length of sg txn
+ * @direction: DMA transfer dirtn
+ * @flags: DMA flags
+ * @context: transfer context (ignored)
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+
+ pr_debug("MDMA: Prep for slave SG\n");
+
+ if (!sg_len || sg == NULL) {
+ pr_err("MDMA: Invalid SG length\n");
+ return NULL;
+ }
+ pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
+ sg_len, direction, flags);
+ if (direction != DMA_MEM_TO_MEM) {
+ return intel_mid_dma_chan_prep_desc(chan, sg, NULL, flags,
+ sg_len, direction);
+ } else {
+ pr_err("MDMA: Invalid Direction\n");
+ return NULL;
+ }
}
/**
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
+ pr_debug("entry:%s\n", __func__);
+ if (false == midc->in_use) {
+ pr_err("ERR_MDMA: try to free chnl already freed\n");
+ return;
+ }
if (true == midc->busy) {
/*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n");
+ dump_dma_reg(chan);
}
+
+ /* Disable CH interrupts */
+ disable_dma_interrupt(midc);
+ clear_dma_channel_interrupt(midc);
+
+ midc->block_intr_status = false;
+ midc->in_use = false;
+ midc->busy = false;
+
+ tasklet_unlock_wait(&mid->tasklet);
+
spin_lock_bh(&midc->lock);
midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
list_del(&desc->desc_node);
- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
}
list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
list_del(&desc->desc_node);
- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
}
list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
list_del(&desc->desc_node);
- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
}
+ midc->raw_tfr = 0;
spin_unlock_bh(&midc->lock);
- midc->in_use = false;
- midc->busy = false;
- /* Disable CH interrupts */
- iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
- iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
- pm_runtime_put(&mid->pdev->dev);
+
+ if (midc->lli_pool) {
+ dma_pool_destroy(midc->lli_pool);
+ midc->lli_pool = NULL;
+ }
+
+ /* Disable the channel */
+ iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+ pm_runtime_put(mid->dev);
}
/**
dma_addr_t phys;
int i = 0;
- pm_runtime_get_sync(&mid->pdev->dev);
+ pm_runtime_get_sync(mid->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(&mid->pdev->dev)) {
+ if (dma_resume(mid->dev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
}
/* ASSERT: channel is idle */
- if (test_ch_en(mid->dma_base, midc->ch_id)) {
- /*ch is not idle*/
+ if (midc->in_use == true) {
pr_err("ERR_MDMA: ch not idle\n");
- pm_runtime_put(&mid->pdev->dev);
+ pm_runtime_put(mid->dev);
return -EIO;
}
dma_cookie_init(chan);
spin_lock_bh(&midc->lock);
while (midc->descs_allocated < DESCS_PER_CHANNEL) {
spin_unlock_bh(&midc->lock);
- desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
+ desc = dma_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
if (!desc) {
pr_err("ERR_MDMA: desc failed\n");
- pm_runtime_put(&mid->pdev->dev);
+ pm_runtime_put(mid->dev);
return -ENOMEM;
/*check*/
}
i = ++midc->descs_allocated;
list_add_tail(&desc->desc_node, &midc->free_list);
}
+ midc->busy = false;
spin_unlock_bh(&midc->lock);
midc->in_use = true;
- midc->busy = false;
+ midc->block_intr_status = false;
pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
return i;
}
static void midc_handle_error(struct middma_device *mid,
struct intel_mid_dma_chan *midc)
{
- midc_scan_descriptors(mid, midc);
+ midc_collect_descriptors(mid, midc);
+ midc_start_descriptors(mid, midc);
}
/**
struct intel_mid_dma_chan *midc = NULL;
u32 status, raw_tfr, raw_block;
int i;
-
mid = (struct middma_device *)data;
if (mid == NULL) {
pr_err("ERR_MDMA: tasklet Null param\n");
return;
}
- pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
raw_tfr = ioread32(mid->dma_base + RAW_TFR);
- raw_block = ioread32(mid->dma_base + RAW_BLOCK);
- status = raw_tfr | raw_block;
- status &= mid->intr_mask;
+ status = raw_tfr & mid->tfr_intr_mask;
+ pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+ pr_debug("tfr_mask:%#lx, raw_tfr:%#x, status:%#x\n",
+ mid->tfr_intr_mask, raw_tfr, status);
while (status) {
/*txn interrupt*/
- i = get_ch_index(&status, mid->chan_base);
+ i = get_ch_index(status, mid->chan_base);
if (i < 0) {
pr_err("ERR_MDMA:Invalid ch index %x\n", i);
return;
}
+ /* clear the status bit */
+ status = status & ~(1 << (i + mid->chan_base));
midc = &mid->ch[i];
if (midc == NULL) {
pr_err("ERR_MDMA:Null param midc\n");
}
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
status, midc->ch_id, i);
- midc->raw_tfr = raw_tfr;
- midc->raw_block = raw_block;
spin_lock_bh(&midc->lock);
+ midc->raw_tfr = raw_tfr;
/*clearing this interrupts first*/
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
- if (raw_block) {
- iowrite32((1 << midc->ch_id),
- mid->dma_base + CLEAR_BLOCK);
+ if (likely(midc->in_use)) {
+ midc_collect_descriptors(mid, midc);
+ midc_start_descriptors(mid, midc);
}
- midc_scan_descriptors(mid, midc);
pr_debug("MDMA:Scan of desc... complete, unmasking\n");
iowrite32(UNMASK_INTR_REG(midc->ch_id),
- mid->dma_base + MASK_TFR);
- if (raw_block) {
- iowrite32(UNMASK_INTR_REG(midc->ch_id),
- mid->dma_base + MASK_BLOCK);
+ mid->dma_base + MASK_TFR);
+ spin_unlock_bh(&midc->lock);
+ }
+
+ raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+ status = raw_block & mid->block_intr_mask;
+ pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+ pr_debug("block_mask:%#lx, raw_block%#x, status:%#x\n",
+ mid->block_intr_mask, raw_block, status);
+ while (status) {
+ /*txn interrupt*/
+ i = get_ch_index(status, mid->chan_base);
+ if (i < 0) {
+ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+ return;
+ }
+ /* clear the status bit */
+ status = status & ~(1 << (i + mid->chan_base));
+ midc = &mid->ch[i];
+ if (midc == NULL) {
+ pr_err("ERR_MDMA:Null param midc\n");
+ return;
+ }
+ pr_debug("MDMA:Tx complete interrupt raw block %x, Ch No %d Index %d\n",
+ status, midc->ch_id, i);
+ spin_lock_bh(&midc->lock);
+ /*clearing this interrupts first*/
+
+ midc->raw_block = raw_block;
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+ if (midc->block_intr_status) {
+ midc_collect_descriptors(mid, midc);
+ midc_start_descriptors(mid, midc);
}
+
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ mid->dma_base + MASK_BLOCK);
spin_unlock_bh(&midc->lock);
}
status = ioread32(mid->dma_base + RAW_ERR);
- status &= mid->intr_mask;
+ pr_debug("MDMA:raw error status:%#x\n", status);
while (status) {
/*err interrupt*/
- i = get_ch_index(&status, mid->chan_base);
+ i = get_ch_index(status, mid->chan_base);
if (i < 0) {
- pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+ pr_err("ERR_MDMA:Invalid ch index %x (raw err)\n", i);
return;
}
+ status = status & ~(1 << (i + mid->chan_base));
midc = &mid->ch[i];
if (midc == NULL) {
- pr_err("ERR_MDMA:Null param midc\n");
+ pr_err("ERR_MDMA:Null param midc (raw err)\n");
return;
}
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
{
struct middma_device *mid = data;
- u32 tfr_status, err_status;
- int call_tasklet = 0;
+ u32 tfr_status, err_status, block_status;
+ u32 isr;
- tfr_status = ioread32(mid->dma_base + RAW_TFR);
- err_status = ioread32(mid->dma_base + RAW_ERR);
- if (!tfr_status && !err_status)
+ /* On Baytrail, the DMAC is sharing IRQ with other devices */
+ if (is_byt_lpio_dmac(mid) && mid->state == SUSPENDED)
return IRQ_NONE;
/*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
- pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
- tfr_status &= mid->intr_mask;
+ if (!mid) {
+ pr_err("ERR_MDMA:null pointer mid\n");
+ return -EINVAL;
+ }
+
+ /* Read the interrupt status registers */
+ tfr_status = ioread32(mid->dma_base + STATUS_TFR);
+ err_status = ioread32(mid->dma_base + STATUS_ERR);
+ block_status = ioread32(mid->dma_base + STATUS_BLOCK);
+
+ /* Common case if the IRQ is shared with other devices */
+ if (!tfr_status && !err_status && !block_status)
+ return IRQ_NONE;
+
+ pr_debug("MDMA: trf_Status %x, Mask %x\n", tfr_status, mid->intr_mask);
if (tfr_status) {
/*need to disable intr*/
- iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
- iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
- pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
- call_tasklet = 1;
+ iowrite32((tfr_status << INT_MASK_WE),
+ mid->dma_base + MASK_TFR);
+ }
+ if (block_status) {
+ /*need to disable intr*/
+ iowrite32((block_status << INT_MASK_WE),
+ mid->dma_base + MASK_BLOCK);
}
- err_status &= mid->intr_mask;
if (err_status) {
iowrite32((err_status << INT_MASK_WE),
mid->dma_base + MASK_ERR);
- call_tasklet = 1;
}
- if (call_tasklet)
- tasklet_schedule(&mid->tasklet);
+ /* in mrlfd we need to clear the pisr bits to stop intr as well
+ * so read the PISR register, see if we have pisr bits status and clear
+ * them
+ */
+ if (mid->pimr_mask && !mid->dword_trf) {
+ isr = readl(mid->mask_reg);
+ pr_debug("isr says: %x", isr);
+ if (isr) {
+ isr &= mid->pimr_mask;
+ pr_debug("writing isr: %x", isr);
+ writel(isr, mid->mask_reg);
+ }
+ }
+
+ tasklet_schedule(&mid->tasklet);
return IRQ_HANDLED;
}
return intel_mid_dma_interrupt(irq, data);
}
+static void config_dma_fifo_partition(struct middma_device *dma)
+{
+ /* program FIFO Partition registers - 128 bytes for each ch */
+ iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION0_HI);
+ iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_LO);
+ iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_HI);
+ iowrite32(DMA_FIFO_SIZE | BIT(26), dma->dma_base + FIFO_PARTITION0_LO);
+}
+
+/* v1 ops will be used for Medfield & CTP platforms */
+static struct intel_mid_dma_ops v1_dma_ops = {
+ .device_alloc_chan_resources = intel_mid_dma_alloc_chan_resources,
+ .device_free_chan_resources = intel_mid_dma_free_chan_resources,
+ .device_prep_dma_memcpy = intel_mid_dma_prep_memcpy,
+ .device_prep_dma_sg = intel_mid_dma_prep_sg,
+ .device_prep_slave_sg = intel_mid_dma_prep_slave_sg,
+ .device_control = intel_mid_dma_device_control,
+ .device_tx_status = intel_mid_dma_tx_status,
+ .device_issue_pending = intel_mid_dma_issue_pending,
+ .dma_chan_suspend = intel_mid_dma_chan_suspend_v1,
+};
+
+/* v2 ops will be used in Merrifield and beyond plantforms */
+static struct intel_mid_dma_ops v2_dma_ops = {
+ .device_alloc_chan_resources = intel_mid_dma_alloc_chan_resources,
+ .device_free_chan_resources = intel_mid_dma_free_chan_resources,
+ .device_prep_dma_memcpy = intel_mid_dma_prep_memcpy_v2,
+ .device_prep_dma_sg = intel_mid_dma_prep_sg,
+ .device_prep_slave_sg = intel_mid_dma_prep_slave_sg,
+ .device_control = intel_mid_dma_device_control,
+ .device_tx_status = intel_mid_dma_tx_status,
+ .device_issue_pending = intel_mid_dma_issue_pending,
+ .dma_chan_suspend = intel_mid_dma_chan_suspend_v2,
+};
+
/**
* mid_setup_dma - Setup the DMA controller
* @pdev: Controller PCI device structure
* Initialize the DMA controller, channels, registers with DMA engine,
* ISR. Initialize DMA controller channels.
*/
-static int mid_setup_dma(struct pci_dev *pdev)
+int mid_setup_dma(struct device *dev)
{
- struct middma_device *dma = pci_get_drvdata(pdev);
+ struct middma_device *dma = dev_get_drvdata(dev);
int err, i;
/* DMA coherent memory pool for DMA descriptor allocations */
- dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
+ dma->dma_pool = dma_pool_create("intel_mid_dma_desc_pool", dev,
sizeof(struct intel_mid_dma_desc),
32, 0);
if (NULL == dma->dma_pool) {
- pr_err("ERR_MDMA:pci_pool_create failed\n");
+ pr_err("ERR_MDMA:dma_pool_create failed\n");
err = -ENOMEM;
+ kfree(dma);
goto err_dma_pool;
}
INIT_LIST_HEAD(&dma->common.channels);
- dma->pci_id = pdev->device;
if (dma->pimr_mask) {
- dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
- LNW_PERIPHRAL_MASK_SIZE);
+ dma->mask_reg = devm_ioremap(dma->dev, dma->pimr_base, LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
err = -ENOMEM;
- goto err_ioremap;
+ goto err_setup;
}
- } else
+ } else {
dma->mask_reg = NULL;
+ }
pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
/*init CH structures*/
dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
- dma->common.dev = &pdev->dev;
+ dma->common.dev = dev;
- dma->common.device_alloc_chan_resources =
- intel_mid_dma_alloc_chan_resources;
- dma->common.device_free_chan_resources =
- intel_mid_dma_free_chan_resources;
+ dma->common.device_alloc_chan_resources = dma->dma_ops.device_alloc_chan_resources;
+ dma->common.device_free_chan_resources = dma->dma_ops.device_free_chan_resources;
- dma->common.device_tx_status = intel_mid_dma_tx_status;
- dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
- dma->common.device_issue_pending = intel_mid_dma_issue_pending;
- dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
- dma->common.device_control = intel_mid_dma_device_control;
+ dma->common.device_tx_status = dma->dma_ops.device_tx_status;
+ dma->common.device_prep_dma_memcpy = dma->dma_ops.device_prep_dma_memcpy;
+ dma->common.device_prep_dma_sg = dma->dma_ops.device_prep_dma_sg;
+ dma->common.device_issue_pending = dma->dma_ops.device_issue_pending;
+ dma->common.device_prep_slave_sg = dma->dma_ops.device_prep_slave_sg;
+ dma->common.device_control = dma->dma_ops.device_control;
/*enable dma cntrl*/
iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
/*register irq */
if (dma->pimr_mask) {
pr_debug("MDMA:Requesting irq shared for DMAC1\n");
- err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
+ err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt1,
IRQF_SHARED, "INTEL_MID_DMAC1", dma);
if (0 != err)
- goto err_irq;
+ goto err_setup;
} else {
dma->intr_mask = 0x03;
pr_debug("MDMA:Requesting irq for DMAC2\n");
- err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
+ err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt2,
IRQF_SHARED, "INTEL_MID_DMAC2", dma);
if (0 != err)
- goto err_irq;
+ goto err_setup;
}
/*register device w/ engine*/
err = dma_async_device_register(&dma->common);
if (0 != err) {
pr_err("ERR_MDMA:device_register failed: %d\n", err);
- goto err_engine;
+ goto err_dma_pool;
}
if (dma->pimr_mask) {
pr_debug("setting up tasklet1 for DMAC1\n");
pr_debug("setting up tasklet2 for DMAC2\n");
tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
}
+ if (!dma->dword_trf) {
+ config_dma_fifo_partition(dma);
+ /* Mask all interrupts from DMA controller to IA by default */
+ dmac1_mask_periphral_intr(dma);
+ }
return 0;
-err_engine:
- free_irq(pdev->irq, dma);
-err_irq:
- if (dma->mask_reg)
- iounmap(dma->mask_reg);
-err_ioremap:
- pci_pool_destroy(dma->dma_pool);
+err_setup:
+ dma_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
return err;
/**
* middma_shutdown - Shutdown the DMA controller
- * @pdev: Controller PCI device structure
+ * @dev: Controller device structure
*
* Called by remove
* Unregister DMa controller, clear all structures and free interrupt
*/
-static void middma_shutdown(struct pci_dev *pdev)
+void middma_shutdown(struct device *dev)
{
- struct middma_device *device = pci_get_drvdata(pdev);
+ struct middma_device *device = dev_get_drvdata(dev);
dma_async_device_unregister(&device->common);
- pci_pool_destroy(device->dma_pool);
- if (device->mask_reg)
- iounmap(device->mask_reg);
- if (device->dma_base)
- iounmap(device->dma_base);
- free_irq(pdev->irq, device);
+ dma_pool_destroy(device->dma_pool);
return;
}
+struct middma_device *mid_dma_setup_context(struct device *dev,
+ struct intel_mid_dma_probe_info *info)
+{
+ struct middma_device *mid_device;
+ mid_device = devm_kzalloc(dev, sizeof(*mid_device), GFP_KERNEL);
+ if (!mid_device) {
+ pr_err("ERR_MDMA:kzalloc failed probe\n");
+ return NULL;
+ }
+ mid_device->dev = dev;
+ mid_device->max_chan = info->max_chan;
+ mid_device->chan_base = info->ch_base;
+ mid_device->block_size = info->block_size;
+ mid_device->pimr_mask = info->pimr_mask;
+ mid_device->pimr_base = info->pimr_base;
+ mid_device->dword_trf = info->dword_trf;
+ mid_device->pimr_offset = info->pimr_offset;
+ mid_device->pci_id = info->pci_id;
+ memcpy(&mid_device->dma_ops, info->pdma_ops, sizeof(struct intel_mid_dma_ops));
+ return mid_device;
+}
+
/**
* intel_mid_dma_probe - PCI Probe
* @pdev: Controller PCI device structure
if (err)
goto err_set_dma_mask;
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- pr_err("ERR_MDMA:kzalloc failed probe\n");
- err = -ENOMEM;
+ pci_dev_get(pdev);
+ device = mid_dma_setup_context(&pdev->dev, info);
+ if (!device)
goto err_kzalloc;
- }
- device->pdev = pci_dev_get(pdev);
+
+ device->pci_id = pdev->device;
base_addr = pci_resource_start(pdev, 0);
bar_size = pci_resource_len(pdev, 0);
- device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
+ device->dma_base = devm_ioremap_nocache(&pdev->dev, base_addr, DMA_REG_SIZE);
if (!device->dma_base) {
pr_err("ERR_MDMA:ioremap failed\n");
err = -ENOMEM;
goto err_ioremap;
}
+ device->irq = pdev->irq;
pci_set_drvdata(pdev, device);
pci_set_master(pdev);
- device->max_chan = info->max_chan;
- device->chan_base = info->ch_base;
- device->block_size = info->block_size;
- device->pimr_mask = info->pimr_mask;
- err = mid_setup_dma(pdev);
+#ifdef CONFIG_PRH_TEMP_WA_FOR_SPID
+ /* PRH uses, ch 4,5,6,7 override the info table data */
+ pr_info("Device is Bodegabay\n");
+ device->max_chan = 4;
+ device->chan_base = 4;
+#endif
+ err = mid_setup_dma(&pdev->dev);
if (err)
- goto err_dma;
+ goto err_ioremap;
pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
return 0;
-err_dma:
- iounmap(device->dma_base);
err_ioremap:
pci_dev_put(pdev);
- kfree(device);
err_kzalloc:
err_set_dma_mask:
pci_release_regions(pdev);
*/
static void intel_mid_dma_remove(struct pci_dev *pdev)
{
- struct middma_device *device = pci_get_drvdata(pdev);
-
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_forbid(&pdev->dev);
- middma_shutdown(pdev);
+ middma_shutdown(&pdev->dev);
pci_dev_put(pdev);
- kfree(device);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
/* Power Management */
/*
-* dma_suspend - PCI suspend function
+* dma_suspend - suspend function
*
-* @pci: PCI device structure
-* @state: PM message
+* @dev: device structure
*
* This function is called by OS when a power event occurs
*/
-static int dma_suspend(struct device *dev)
+int dma_suspend(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
int i;
- struct middma_device *device = pci_get_drvdata(pci);
+ struct middma_device *device = dev_get_drvdata(dev);
pr_debug("MDMA: dma_suspend called\n");
for (i = 0; i < device->max_chan; i++) {
}
dmac1_mask_periphral_intr(device);
device->state = SUSPENDED;
- pci_save_state(pci);
- pci_disable_device(pci);
- pci_set_power_state(pci, PCI_D3hot);
+
return 0;
}
/**
-* dma_resume - PCI resume function
+* dma_resume - resume function
*
-* @pci: PCI device structure
+* @dev: device structure
*
* This function is called by OS when a power event occurs
*/
int dma_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
- int ret;
- struct middma_device *device = pci_get_drvdata(pci);
+ struct middma_device *device = dev_get_drvdata(dev);
pr_debug("MDMA: dma_resume called\n");
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- ret = pci_enable_device(pci);
- if (ret) {
- pr_err("MDMA: device can't be enabled for %x\n", pci->device);
- return ret;
- }
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+
+ if (!device->dword_trf)
+ config_dma_fifo_partition(device);
+
return 0;
}
static int dma_runtime_suspend(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct middma_device *device = pci_get_drvdata(pci_dev);
-
- device->state = SUSPENDED;
- return 0;
+ return dma_suspend(dev);
}
static int dma_runtime_resume(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct middma_device *device = pci_get_drvdata(pci_dev);
-
- device->state = RUNNING;
- iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
- return 0;
+ return dma_resume(dev);
}
static int dma_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct middma_device *device = pci_get_drvdata(pdev);
+ struct middma_device *device = dev_get_drvdata(dev);
int i;
for (i = 0; i < device->max_chan; i++) {
if (device->ch[i].in_use)
return -EAGAIN;
}
-
- return pm_schedule_suspend(dev, 0);
+ return pm_schedule_suspend(dev, 0);;
}
/******************************************************************************
* PCI stuff
*/
static struct pci_device_id intel_mid_dma_ids[] = {
- { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
- { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
- { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
- { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
+ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),
+ INFO(2, 6, SST_MAX_DMA_LEN, 0x200020, 0xFFAE8008, 1, 0x8, INTEL_MID_DMAC1_ID, &v1_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),
+ INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_DMAC2_ID, &v1_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),
+ INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_GP_DMAC2_ID, &v1_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),
+ INFO(4, 0, SST_MAX_DMA_LEN, 0x400040, 0xFFAE8008, 1, 0x8, INTEL_MFLD_DMAC1_ID, &v1_dma_ops)},
+ /* Cloverview support */
+ { PCI_VDEVICE(INTEL, INTEL_CLV_GP_DMAC2_ID),
+ INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_CLV_GP_DMAC2_ID, &v1_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_CLV_DMAC1_ID),
+ INFO(4, 0, SST_MAX_DMA_LEN, 0x400040, 0xFFAE8008, 1, 0x8, INTEL_CLV_DMAC1_ID, &v1_dma_ops)},
+ /* Mrfld */
+ { PCI_VDEVICE(INTEL, INTEL_MRFLD_GP_DMAC2_ID),
+ INFO(4, 0, SST_MAX_DMA_LEN_MRFLD, 0, 0, 0, 0, INTEL_MRFLD_GP_DMAC2_ID, &v2_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_MRFLD_DMAC0_ID),
+ INFO(2, 6, SST_MAX_DMA_LEN_MRFLD, 0xFF0000, 0xFF340018, 0, 0x10, INTEL_MRFLD_DMAC0_ID, &v2_dma_ops)},
+ /* Baytrail Low Speed Peripheral DMA */
+ { PCI_VDEVICE(INTEL, INTEL_BYT_LPIO1_DMAC_ID),
+ INFO(6, 0, 2047, 0, 0, 1, 0, INTEL_BYT_LPIO1_DMAC_ID, &v1_dma_ops)},
+ { PCI_VDEVICE(INTEL, INTEL_BYT_LPIO2_DMAC_ID),
+ INFO(6, 0, 2047, 0, 0, 1, 0, INTEL_BYT_LPIO2_DMAC_ID, &v1_dma_ops)},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
+struct intel_mid_dma_probe_info dma_byt_info = {
+ .max_chan = 4,
+ .ch_base = 4,
+ .block_size = 131071,
+ .pimr_mask = 0x00FF0000,
+ .pimr_base = 0xDF540018,
+ .dword_trf = 0,
+ .pimr_offset = 0x10,
+ .pci_id = INTEL_BYT_DMAC0_ID,
+ .pdma_ops = &v2_dma_ops,
+};
+
static const struct dev_pm_ops intel_mid_dma_pm = {
- .runtime_suspend = dma_runtime_suspend,
- .runtime_resume = dma_runtime_resume,
- .runtime_idle = dma_runtime_idle,
- .suspend = dma_suspend,
- .resume = dma_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(dma_suspend,
+ dma_resume)
+ SET_RUNTIME_PM_OPS(dma_runtime_suspend,
+ dma_runtime_resume,
+ dma_runtime_idle)
};
static struct pci_driver intel_mid_dma_pci_driver = {
- .name = "Intel MID DMA",
+ .name = "intel_mid_dma",
.id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe,
.remove = intel_mid_dma_remove,
#endif
};
+static const struct acpi_device_id dma_acpi_ids[];
+
+struct intel_mid_dma_probe_info *mid_get_acpi_driver_data(const char *hid)
+{
+ const struct acpi_device_id *id;
+
+ pr_debug("%s", __func__);
+ for (id = dma_acpi_ids; id->id[0]; id++)
+ if (!strncmp(id->id, hid, 16))
+ return (struct intel_mid_dma_probe_info *)id->driver_data;
+ return NULL;
+}
+static const struct acpi_device_id dma_acpi_ids[] = {
+ { "DMA0F28", (kernel_ulong_t)&dma_byt_info },
+ { },
+};
+
+static struct platform_driver intel_dma_acpi_driver = {
+ .driver = {
+ .name = "intel_dma_acpi",
+ .owner = THIS_MODULE,
+ .acpi_match_table = dma_acpi_ids,
+ .pm = &intel_mid_dma_pm,
+ },
+ .probe = dma_acpi_probe,
+ .remove = dma_acpi_remove,
+};
+
static int __init intel_mid_dma_init(void)
{
+ int ret;
+
pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
INTEL_MID_DMA_DRIVER_VERSION);
- return pci_register_driver(&intel_mid_dma_pci_driver);
+ ret = pci_register_driver(&intel_mid_dma_pci_driver);
+ if (ret)
+ pr_err("PCI dev registration failed");
+
+ ret = platform_driver_register(&intel_dma_acpi_driver);
+ if (ret)
+ pr_err("Platform dev registration failed");
+ return ret;
}
-fs_initcall(intel_mid_dma_init);
+module_init(intel_mid_dma_init);
static void __exit intel_mid_dma_exit(void)
{
pci_unregister_driver(&intel_mid_dma_pci_driver);
+ platform_driver_unregister(&intel_dma_acpi_driver);
}
module_exit(intel_mid_dma_exit);
MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
+MODULE_ALIAS("pci:intel_mid_dma");
+MODULE_ALIAS("acpi:intel_dma_acpi");
--- /dev/null
+
+/* intel_mid_dma_acpi.c - Intel MID DMA driver init file for ACPI enumaration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Authors: Ramesh Babu K V <Ramesh.Babu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_runtime.h>
+#include <acpi/acpi_bus.h>
+
+#include "intel_mid_dma_regs.h"
+
+#define HID_MAX_SIZE 8
+
+struct list_head dma_dev_list;
+
+LIST_HEAD(dma_dev_list);
+
+struct acpi_dma_dev_list {
+ struct list_head dmadev_list;
+ char dma_hid[HID_MAX_SIZE];
+ struct device *acpi_dma_dev;
+};
+
+struct device *intel_mid_get_acpi_dma(const char *hid)
+{
+ struct acpi_dma_dev_list *listnode;
+ if (list_empty(&dma_dev_list))
+ return NULL;
+
+ list_for_each_entry(listnode, &dma_dev_list, dmadev_list) {
+ if (!(strncmp(listnode->dma_hid, hid, HID_MAX_SIZE)))
+ return listnode->acpi_dma_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(intel_mid_get_acpi_dma);
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int mid_get_and_map_rsrc(void **dest, struct platform_device *pdev,
+ unsigned int num)
+{
+ struct resource *rsrc;
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, num);
+ if (!rsrc) {
+ pr_err("%s: Invalid resource - %d", __func__, num);
+ return -EIO;
+ }
+ pr_debug("rsrc #%d = %#x", num, rsrc->start);
+ *dest = devm_ioremap_nocache(&pdev->dev, rsrc->start, resource_size(rsrc));
+ if (!*dest) {
+ pr_err("%s: unable to map resource: %#x", __func__, rsrc->start);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mid_platform_get_resources(struct middma_device *mid_device,
+ struct platform_device *pdev)
+{
+ int ret;
+ pr_debug("%s", __func__);
+
+ /* All ACPI resource request here */
+ /* Get DDR addr from platform resource table */
+ ret = mid_get_and_map_rsrc(&mid_device->dma_base, pdev, 0);
+ if (ret)
+ return ret;
+ pr_debug("dma_base:%p", mid_device->dma_base);
+
+ ret = mid_get_and_map_rsrc(&mid_device->mask_reg, pdev, 1);
+ if (ret)
+ return ret;
+ /* mask_reg should point to ISRX register */
+ mid_device->mask_reg += 0x18;
+ pr_debug("pimr_base:%p", mid_device->mask_reg);
+
+ mid_device->irq = platform_get_irq(pdev, 0);
+ if (mid_device->irq < 0) {
+ pr_err("invalid irq:%d", mid_device->irq);
+ return mid_device->irq;
+ }
+ pr_debug("irq from pdev is:%d", mid_device->irq);
+
+ return 0;
+}
+
+int dma_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *device;
+ struct middma_device *mid_device;
+ struct intel_mid_dma_probe_info *info;
+ const char *hid;
+ int ret;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret) {
+ pr_err("%s: could not get acpi device - %d\n", __func__, ret);
+ return -ENODEV;
+ }
+
+ if (acpi_bus_get_status(device) || !device->status.present) {
+ pr_err("%s: device has invalid status", __func__);
+ return -ENODEV;
+ }
+
+ hid = acpi_device_hid(device);
+ pr_info("%s for %s", __func__, hid);
+
+ /* Apply default dma_mask if needed */
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("dma_set_mask failed with err:%d", ret);
+ return ret;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("_coherent_mask failed with err:%d", ret);
+ return ret;
+ }
+ info = mid_get_acpi_driver_data(hid);
+ if (!info) {
+ pr_err("acpi driver data is null");
+ goto err_dma;
+ }
+
+ mid_device = mid_dma_setup_context(&pdev->dev, info);
+ if (!mid_device)
+ goto err_dma;
+
+ ret = mid_platform_get_resources(mid_device, pdev);
+ if (ret) {
+ pr_err("Error while get resources:%d", ret);
+ goto err_dma;
+ }
+ platform_set_drvdata(pdev, mid_device);
+ ret = mid_setup_dma(&pdev->dev);
+ if (ret)
+ goto err_dma;
+ pm_runtime_enable(&pdev->dev);
+ acpi_dma_dev = &pdev->dev;
+ pr_debug("%s:completed", __func__);
+ return 0;
+err_dma:
+ pr_err("ERR_MDMA:Probe failed %d\n", ret);
+ return ret;
+}
+#else
+int dma_acpi_probe(struct platform_device *pdev)
+{
+ return -EIO;
+}
+#endif
+
+int dma_acpi_remove(struct platform_device *pdev)
+{
+ pm_runtime_forbid(&pdev->dev);
+ middma_shutdown(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
+#define MID_MAX_CHAN 8 /*max ch across controllers*/
+
#define REG_BIT0 0x00000001
#define REG_BIT8 0x00000100
#define INT_MASK_WE 0x8
#define DISABLE_CHANNEL(chan_num) \
(REG_BIT8 << chan_num)
-#define DESCS_PER_CHANNEL 16
+#define DESCS_PER_CHANNEL 128
/*DMA Registers*/
/*registers associated with channel programming*/
#define DMA_REG_SIZE 0x400
#define DMA_CH_SIZE 0x58
+#define DMA_FIFO_SIZE 0x100080
+
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
#define SAR 0x00 /* Source Address Register*/
#define INTR_STATUS 0x360
#define DMA_CFG 0x398
#define DMA_CHAN_EN 0x3A0
+#define FIFO_PARTITION0_LO 0x400
+#define FIFO_PARTITION0_HI 0x404
+#define FIFO_PARTITION1_LO 0x408
+#define FIFO_PARTITION1_HI 0x40C
+#define CH_SAI_ERR 0x410
+
+#define CTL_LO_BIT_LLP_DST_EN 27
+#define CTL_LO_BIT_LLP_SRC_EN 28
+
+#define CH_SUSPEND (BIT(8))
+#define CH_DRAIN (BIT(10))
/*DMA channel control registers*/
union intel_mid_dma_ctl_lo {
u32 llp_src_en:1; /*enable/disable source LLP = 0*/
u32 reser2:3;
} ctlx;
+ struct {
+ u32 int_en:1; /*enable or disable interrupts*/
+ /*should be 0*/
+ u32 dst_tr_width:3; /*destination transfer width*/
+ /*usually 32 bits = 010*/
+ u32 src_tr_width:3; /*source transfer width*/
+ /*usually 32 bits = 010*/
+ u32 rsvd4:1;
+ u32 dinc:1; /*destination address inc/dec*/
+ u32 rsvd3:1;
+ /*For mem:INC=00, Periphral NoINC=11*/
+ u32 sinc:1; /*source address inc or dec, as above*/
+ u32 dst_msize:3; /*destination burst transaction length*/
+ /*always = 16 ie 011*/
+ u32 src_msize:3; /*source burst transaction length*/
+ /*always = 16 ie 011*/
+ u32 src_gather_en:1;
+ u32 dst_scatter_en:1;
+ u32 rsvd2:1;
+ u32 tt_fc:2; /*transfer type and flow controller*/
+ /*M-M = 000
+ P-M = 010
+ M-P = 001*/
+ u32 rsvd1:5;
+ u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
+ u32 llp_src_en:1; /*enable/disable source LLP = 0*/
+ u32 reser:3;
+ } ctlx_v2;
u32 ctl_lo;
};
u32 done:1; /*Done - updated by DMAC*/
u32 reser:19; /*configured by DMAC*/
} ctlx;
+ struct {
+ u32 block_ts:12; /*block transfer size*/
+ u32 done:1; /*Done - updated by DMAC*/
+ u32 ch_weight:11;
+ u32 ch_class:2;
+ } ctlx_v2;
u32 ctl_hi;
-
};
/*DMA channel configuration registers*/
u32 reload_src:1; /*auto reload src addr =1 if src is P*/
u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
} cfgx;
+ struct {
+ u32 dst_burst_align:1;
+ u32 src_burst_align:1;
+ u32 all_np_wr:1;
+ u32 hshake_np_wr:1;
+ u32 rsvd4:1;
+ u32 ctl_hi_upd_en:1;
+ u32 ds_upd_en:1;
+ u32 ss_upd_en:1;
+ u32 ch_susp:1;
+ u32 fifo_empty:1;
+ u32 ch_drain:1;
+ u32 rsvd11:1;
+ u32 rd_snp:1;
+ u32 wr_snp:1;
+ u32 rd_llp_snp:1;
+ u32 rd_stat_snp:1;
+ u32 wr_stat_snp:1;
+ u32 wr_ctlhi_snp:1;
+ u32 dst_hs_pol:1;
+ u32 src_hs_pol:1;
+ u32 dst_opt_bl:1;
+ u32 src_opt_bl:1;
+ u32 rsvd_22_29:8;
+ u32 reload_src:1;
+ u32 reload_dst:1;
+ } cfgx_v2;
u32 cfg_lo;
};
u32 dst_per:4; /*dstn hw HS interface*/
u32 reser2:17;
} cfgx;
+ struct {
+ u32 src_per:4; /*src hw HS interface*/
+ u32 dst_per:4; /*dstn hw HS interface*/
+ u32 rd_issue_thd:10;
+ u32 wr_issue_thd:10;
+ u32 src_per_ext:2;
+ u32 dst_per_ext:2;
+ } cfgx_v2;
u32 cfg_hi;
};
+struct intel_mid_dma_ops {
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+
+ struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg);
+
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
+ void (*dma_chan_suspend)(struct dma_chan *chan);
+};
/**
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
* @active_list: current active descriptors
* @queue: current queued up descriptors
* @free_list: current free descriptors
- * @slave: dma slave structure
- * @descs_allocated: total number of descriptors allocated
- * @dma: dma device structure pointer
+ * @slave: dma slave struture
+ * @descs_allocated: total number of decsiptors allocated
+ * @dma: dma device struture pointer
* @busy: bool representing if ch is busy (active txn) or not
* @in_use: bool representing if ch is in use or not
* @raw_tfr: raw trf interrupt received
* @raw_block: raw block interrupt received
+ * @block_intr_status: bool representing if block intr is enabled or not
*/
struct intel_mid_dma_chan {
struct dma_chan chan;
u32 raw_tfr;
u32 raw_block;
struct intel_mid_dma_slave *mid_slave;
+ struct dma_pool *lli_pool;
+ bool block_intr_status;
};
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
/**
* struct middma_device - internal representation of a DMA device
* @pdev: PCI device
+ * @dev : pointer to current device struct
+ * @irq : holds irq for the device
* @dma_base: MMIO register space pointer of DMA
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @block_size: Block size of DMA transfer supported (from drv_data)
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
* @state: dma PM device state
+ * @tfr_intr_mask: hold the status of tfr intr mask register
+ * @block_intr_mask: hold the status of block intr mask register
*/
struct middma_device {
- struct pci_dev *pdev;
+ struct device *dev;
+ unsigned int irq;
void __iomem *dma_base;
struct pci_pool *dma_pool;
struct dma_device common;
struct tasklet_struct tasklet;
- struct intel_mid_dma_chan ch[MAX_CHAN];
+ struct intel_mid_dma_chan ch[MID_MAX_CHAN];
unsigned int pci_id;
unsigned int intr_mask;
void __iomem *mask_reg;
int max_chan;
int block_size;
unsigned int pimr_mask;
+ unsigned int pimr_base;
+ unsigned int dword_trf;
+ unsigned int pimr_offset;
+ unsigned long tfr_intr_mask;
+ unsigned long block_intr_mask;
enum intel_mid_dma_state state;
+ struct intel_mid_dma_ops dma_ops;
};
static inline struct middma_device *to_middma_device(struct dma_device *common)
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
};
-
+/* struct intel_mid_dma_lli is used to provide the DMA IP with SAR,DAR,LLP etc.
+ Use u32 for the elements of this structure irrespective
+ of whether dma_addr_t is u32 or u64.This is necessary because
+ the DMA IP expects these elements to be 32 bit wide */
struct intel_mid_dma_lli {
- dma_addr_t sar;
- dma_addr_t dar;
- dma_addr_t llp;
+ u32 sar;
+ u32 dar;
+ u32 llp;
u32 ctl_lo;
u32 ctl_hi;
} __attribute__ ((packed));
+struct intel_mid_dma_probe_info {
+ u8 max_chan;
+ u8 ch_base;
+ u32 block_size;
+ u32 pimr_mask;
+ u32 pimr_base;
+ u8 dword_trf;
+ u32 pimr_offset;
+ unsigned int pci_id;
+ struct intel_mid_dma_ops *pdma_ops;
+};
+
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
{
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
}
+struct middma_device *mid_dma_setup_context(struct device *dev,
+ struct intel_mid_dma_probe_info *info);
int dma_resume(struct device *dev);
-
+int dma_acpi_probe(struct platform_device *pdev);
+int dma_acpi_remove(struct platform_device *pdev);
+struct intel_mid_dma_probe_info *mid_get_acpi_driver_data(const char *hid);
+int mid_setup_dma(struct device *dev);
+void middma_shutdown(struct device *dev);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
Kernel drivers may also request that a particular GPIO be
exported to userspace; this can be useful when debugging.
+config GPIODEBUG
+ tristate "GPIO Setting DEBUG"
+ depends on DEBUG_FS
+ help
+ Say yes here to support GPIO/FLIS Setting Debug.
+
+ This is mostly useful to dump and set gpio/flis conguration.
+
+ Kernel drivers may also request that a particular GPIO be
+ exported to userspace; this can be useful when debugging.
+
config GPIO_GENERIC
tristate
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
+obj-$(CONFIG_GPIODEBUG) += gpiodebug.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
-/*
- * Moorestown platform Langwell chip GPIO driver
- *
- * Copyright (c) 2008 - 2009, Intel Corporation.
+/* gpio-langwell.c Moorestown platform Langwell chip GPIO driver
+ * Copyright (c) 2008 - 2013, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/lnw_gpio.h>
#include <linux/pm_runtime.h>
+#include <asm/intel-mid.h>
#include <linux/irqdomain.h>
+#include <asm/intel_scu_flis.h>
+#include "gpiodebug.h"
+
+#define IRQ_TYPE_EDGE (1 << 0)
+#define IRQ_TYPE_LEVEL (1 << 1)
+
+#define TANGIER_I2C_FLIS_START 0x1D00
+#define TANGIER_I2C_FLIS_END 0x1D34
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
GFER, /* falling edge detect */
GEDR, /* edge detect result */
GAFR, /* alt function */
+ GFBR = 9, /* glitch filter bypas */
+ GPIT, /* interrupt type */
+ GPIP = GFER, /* level interrupt polarity */
+ GPIM = GRER, /* level interrupt mask */
+
+ /* the following registers only exist on MRFLD */
+ GFBR_TNG = 6,
+ GIMR, /* interrupt mask */
+ GISR, /* interrupt source */
+ GITR = 32, /* interrupt type */
+ GLPR = 33, /* level-input polarity */
+};
+
+enum GPIO_CONTROLLERS {
+ LINCROFT_GPIO,
+ PENWELL_GPIO_AON,
+ PENWELL_GPIO_CORE,
+ CLOVERVIEW_GPIO_AON,
+ CLOVERVIEW_GPIO_CORE,
+ TANGIER_GPIO,
+};
+
+/* langwell gpio driver data */
+struct lnw_gpio_ddata_t {
+ u16 ngpio; /* number of gpio pins */
+ u32 gplr_offset; /* offset of first GPLR register from base */
+ u32 (*get_flis_offset)(int gpio);
+ u32 chip_irq_type; /* chip interrupt type */
+};
+
+struct gpio_flis_pair {
+ int gpio; /* gpio number */
+ int offset; /* register offset from FLIS base */
+};
+
+/*
+ * The following mapping table lists the pin and flis offset pair
+ * of some key gpio pins, the offset of other gpios can be calculated
+ * from the table.
+ */
+static struct gpio_flis_pair gpio_flis_mapping_table[] = {
+ { 0, 0x2900 },
+ { 12, 0x2544 },
+ { 14, 0x0958 },
+ { 16, 0x2D18 },
+ { 17, 0x1D10 },
+ { 19, 0x1D00 },
+ { 23, 0x1D18 },
+ { 31, -EINVAL }, /* No GPIO 31 in pin list */
+ { 32, 0x1508 },
+ { 44, 0x3500 },
+ { 64, 0x2534 },
+ { 68, 0x2D1C },
+ { 70, 0x1500 },
+ { 72, 0x3D00 },
+ { 77, 0x0D00 },
+ { 97, 0x0954 },
+ { 98, -EINVAL }, /* No GPIO 98-101 in pin list */
+ { 102, 0x1910 },
+ { 120, 0x1900 },
+ { 124, 0x2100 },
+ { 136, -EINVAL }, /* No GPIO 136 in pin list */
+ { 137, 0x2D00 },
+ { 143, -EINVAL }, /* No GPIO 143-153 in pin list */
+ { 154, 0x092C },
+ { 164, 0x3900 },
+ { 177, 0x2500 },
+ { 190, 0x2D50 },
+};
+
+/*
+ * In new version of FW for Merrifield, I2C FLIS register can not
+ * be written directly but go though a IPC way which is sleepable,
+ * so we shouldn't use spin_lock_irq to protect the access when
+ * is_merr_i2c_flis() return true.
+ */
+static inline bool is_merr_i2c_flis(u32 offset)
+{
+ return ((offset >= TANGIER_I2C_FLIS_START)
+ && (offset <= TANGIER_I2C_FLIS_END));
+}
+
+static u32 get_flis_offset_by_gpio(int gpio)
+{
+ int i;
+ int start;
+ u32 offset = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_flis_mapping_table) - 1; i++) {
+ if (gpio >= gpio_flis_mapping_table[i].gpio
+ && gpio < gpio_flis_mapping_table[i + 1].gpio)
+ break;
+ }
+
+ start = gpio_flis_mapping_table[i].gpio;
+
+ if (gpio_flis_mapping_table[i].offset != -EINVAL) {
+ offset = gpio_flis_mapping_table[i].offset
+ + (gpio - start) * 4;
+ }
+
+ return offset;
+}
+
+static struct lnw_gpio_ddata_t lnw_gpio_ddata[] = {
+ [LINCROFT_GPIO] = {
+ .ngpio = 64,
+ },
+ [PENWELL_GPIO_AON] = {
+ .ngpio = 96,
+ .chip_irq_type = IRQ_TYPE_EDGE,
+ },
+ [PENWELL_GPIO_CORE] = {
+ .ngpio = 96,
+ .chip_irq_type = IRQ_TYPE_EDGE,
+ },
+ [CLOVERVIEW_GPIO_AON] = {
+ .ngpio = 96,
+ .chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+ },
+ [CLOVERVIEW_GPIO_CORE] = {
+ .ngpio = 96,
+ .chip_irq_type = IRQ_TYPE_EDGE,
+ },
+ [TANGIER_GPIO] = {
+ .ngpio = 192,
+ .gplr_offset = 4,
+ .get_flis_offset = get_flis_offset_by_gpio,
+ .chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+ },
};
struct lnw_gpio {
- struct gpio_chip chip;
- void *reg_base;
- spinlock_t lock;
- struct pci_dev *pdev;
- struct irq_domain *domain;
+ struct gpio_chip chip;
+ void *reg_base;
+ void *reg_gplr;
+ spinlock_t lock;
+ struct pci_dev *pdev;
+ struct irq_domain *domain;
+ u32 (*get_flis_offset)(int gpio);
+ u32 chip_irq_type;
+ int type;
+ struct gpio_debug *debug;
};
#define to_lnw_priv(chip) container_of(chip, struct lnw_gpio, chip)
unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
void __iomem *ptr;
+ void *base;
- ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+ /**
+ * On TNG B0, GITR[0]'s address is 0xFF008300, while GPLR[0]'s address
+ * is 0xFF008004. To count GITR[0]'s address, it's easier to count
+ * from 0xFF008000. So for GITR,GLPR... we switch the base to reg_base.
+ * This does not affect PNW/CLV, since the reg_gplr is the reg_base,
+ * while on TNG, the reg_gplr has an offset of 0x4.
+ */
+ base = reg_type < GITR ? lnw->reg_gplr : lnw->reg_base;
+ ptr = (void __iomem *)(base + reg_type * nreg * 4 + reg * 4);
return ptr;
}
+void lnw_gpio_set_alt(int gpio, int alt)
+{
+ struct lnw_gpio *lnw;
+ u32 __iomem *mem;
+ int reg;
+ int bit;
+ u32 offset;
+ u32 value;
+ unsigned long flags;
+
+ /* use this trick to get memio */
+ lnw = irq_get_chip_data(gpio_to_irq(gpio));
+ if (!lnw) {
+ pr_err("langwell_gpio: can not find pin %d\n", gpio);
+ return;
+ }
+ if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+ dev_err(lnw->chip.dev, "langwell_gpio: wrong pin %d to config alt\n", gpio);
+ return;
+ }
+#if 0
+ if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+ dev_err(lnw->chip.dev, "langwell_gpio: wrong chip data for pin %d\n", gpio);
+ return;
+ }
+#endif
+ gpio -= lnw->chip.base;
+
+ if (lnw->type != TANGIER_GPIO) {
+ reg = gpio / 16;
+ bit = gpio % 16;
+
+ mem = gpio_reg(&lnw->chip, 0, GAFR);
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(mem + reg);
+ value &= ~(3 << (bit * 2));
+ value |= (alt & 3) << (bit * 2);
+ writel(value, mem + reg);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ dev_dbg(lnw->chip.dev, "ALT: writing 0x%x to %p\n",
+ value, mem + reg);
+ } else {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return;
+
+ if (!is_merr_i2c_flis(offset))
+ spin_lock_irqsave(&lnw->lock, flags);
+
+ value = get_flis_value(offset);
+ value &= ~7;
+ value |= (alt & 7);
+ set_flis_value(value, offset);
+
+ if (!is_merr_i2c_flis(offset))
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(lnw_gpio_set_alt);
+
+int gpio_get_alt(int gpio)
+{
+ struct lnw_gpio *lnw;
+ u32 __iomem *mem;
+ int reg;
+ int bit;
+ u32 value;
+ u32 offset;
+
+ /* use this trick to get memio */
+ lnw = irq_get_chip_data(gpio_to_irq(gpio));
+ if (!lnw) {
+ pr_err("langwell_gpio: can not find pin %d\n", gpio);
+ return -1;
+ }
+ if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+ dev_err(lnw->chip.dev,
+ "langwell_gpio: wrong pin %d to config alt\n", gpio);
+ return -1;
+ }
+#if 0
+ if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+ dev_err(lnw->chip.dev,
+ "langwell_gpio: wrong chip data for pin %d\n", gpio);
+ return -1;
+ }
+#endif
+ gpio -= lnw->chip.base;
+
+ if (lnw->type != TANGIER_GPIO) {
+ reg = gpio / 16;
+ bit = gpio % 16;
+
+ mem = gpio_reg(&lnw->chip, 0, GAFR);
+ value = readl(mem + reg);
+ value &= (3 << (bit * 2));
+ value >>= (bit * 2);
+ } else {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -EINVAL;
+
+ value = get_flis_value(offset) & 7;
+ }
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(gpio_get_alt);
+
+static int lnw_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
+ void __iomem *gfbr;
+ unsigned long flags;
+ u32 value;
+ enum GPIO_REG reg_type;
+
+ reg_type = (lnw->type == TANGIER_GPIO) ? GFBR_TNG : GFBR;
+ gfbr = gpio_reg(chip, offset, reg_type);
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(gfbr);
+ if (debounce) {
+ /* debounce bypass disable */
+ value &= ~BIT(offset % 32);
+ } else {
+ /* debounce bypass enable */
+ value |= BIT(offset % 32);
+ }
+ writel(value, gfbr);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
+ return 0;
+}
+
static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
{
static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
{
- void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
- u32 value = readl(gafr);
- int shift = (offset % 16) << 1, af = (value >> shift) & 3;
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
+ u32 value;
+ void __iomem *gafr;
+ int shift, af;
+
+ if (lnw->type > CLOVERVIEW_GPIO_CORE)
+ return 0;
+
+ gafr = gpio_reg_2bit(chip, offset, GAFR);
+ value = readl(gafr);
+ shift = (offset % 16) << 1;
+ af = (value >> shift) & 3;
if (af) {
value &= ~(3 << shift);
return readl(gplr) & BIT(offset % 32);
}
+#define PULLUP_ENABLE (1 << 8)
+#define PULLDOWN_ENABLE (1 << 9)
+#define PUPD_VAL_2K (0 << 4)
+#define PUPD_VAL_20K (1 << 4)
+#define PUPD_VAL_50K (2 << 4)
+#define PUPD_VAL_910 (3 << 4)
+
+static int lnw_gpio_set_pull(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ u32 flis_offset, flis_value;
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
+ unsigned long flags;
+
+ if (lnw->type != TANGIER_GPIO)
+ return 0;
+
+ flis_offset = lnw->get_flis_offset(gpio);
+ if (WARN(flis_offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -EINVAL;
+ if (is_merr_i2c_flis(flis_offset))
+ return 0;
+
+ spin_lock_irqsave(&lnw->lock, flags);
+ flis_value = get_flis_value(flis_offset);
+ if (value) {
+ flis_value |= PULLUP_ENABLE;
+ flis_value &= ~PULLDOWN_ENABLE;
+ } else {
+ flis_value |= PULLDOWN_ENABLE;
+ flis_value &= ~PULLUP_ENABLE;
+ }
+ flis_value |= PUPD_VAL_50K;
+ set_flis_value(flis_value, flis_offset);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
+ return 0;
+}
+
static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
void __iomem *gpsr, *gpcr;
+ lnw_gpio_set_pull(chip, offset, value);
+
if (value) {
gpsr = gpio_reg(chip, offset, GPSR);
writel(BIT(offset % 32), gpsr);
u32 gpio = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
+ int ret = 0;
void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+ void __iomem *gpit, *gpip;
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
if (lnw->pdev)
pm_runtime_get(&lnw->pdev->dev);
- spin_lock_irqsave(&lnw->lock, flags);
- if (type & IRQ_TYPE_EDGE_RISING)
- value = readl(grer) | BIT(gpio % 32);
- else
- value = readl(grer) & (~BIT(gpio % 32));
- writel(value, grer);
+ /* Chip that supports level interrupt has extra GPIT registers */
+ if (lnw->chip_irq_type & IRQ_TYPE_LEVEL) {
+ switch (lnw->type) {
+ case CLOVERVIEW_GPIO_AON:
+ gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+ gpip = gpio_reg(&lnw->chip, gpio, GPIP);
+ break;
+ case TANGIER_GPIO:
+ gpit = gpio_reg(&lnw->chip, gpio, GITR);
+ gpip = gpio_reg(&lnw->chip, gpio, GLPR);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = readl(gfer) | BIT(gpio % 32);
- else
- value = readl(gfer) & (~BIT(gpio % 32));
- writel(value, gfer);
- spin_unlock_irqrestore(&lnw->lock, flags);
+ spin_lock_irqsave(&lnw->lock, flags);
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ /* To prevent glitches from triggering an unintended
+ * level interrupt, configure GLPR register first
+ * and then configure GITR.
+ */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = readl(gpip) | BIT(gpio % 32);
+ else
+ value = readl(gpip) & (~BIT(gpio % 32));
+ writel(value, gpip);
+
+ value = readl(gpit) | BIT(gpio % 32);
+ writel(value, gpit);
+
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ } else if (type & IRQ_TYPE_EDGE_BOTH) {
+ value = readl(gpit) & (~BIT(gpio % 32));
+ writel(value, gpit);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value = readl(grer) | BIT(gpio % 32);
+ else
+ value = readl(grer) & (~BIT(gpio % 32));
+ writel(value, grer);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = readl(gfer) | BIT(gpio % 32);
+ else
+ value = readl(gfer) & (~BIT(gpio % 32));
+ writel(value, gfer);
+
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ }
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ } else {
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ ret = -EINVAL;
+ } else if (type & IRQ_TYPE_EDGE_BOTH) {
+ spin_lock_irqsave(&lnw->lock, flags);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value = readl(grer) | BIT(gpio % 32);
+ else
+ value = readl(grer) & (~BIT(gpio % 32));
+ writel(value, grer);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = readl(gfer) | BIT(gpio % 32);
+ else
+ value = readl(gfer) & (~BIT(gpio % 32));
+ writel(value, gfer);
+
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ }
+ }
+out:
if (lnw->pdev)
pm_runtime_put(&lnw->pdev->dev);
+ return ret;
+}
+
+static int lnw_set_maskunmask(struct irq_data *d, enum GPIO_REG reg_type,
+ unsigned unmask)
+{
+ struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 value;
+ void __iomem *gp_reg;
+
+ gp_reg = gpio_reg(&lnw->chip, gpio, reg_type);
+
+ spin_lock_irqsave(&lnw->lock, flags);
+
+ if (unmask) {
+ /* enable interrupt from GPIO input pin */
+ value = readl(gp_reg) | BIT(gpio % 32);
+ } else {
+ /* disable interrupt from GPIO input pin */
+ value = readl(gp_reg) & (~BIT(gpio % 32));
+ }
+
+ writel(value, gp_reg);
+
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
return 0;
}
static void lnw_irq_unmask(struct irq_data *d)
{
+ struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *gpit;
+
+ if (gpio >= lnw->chip.ngpio)
+ return;
+
+ switch (lnw->type) {
+ case CLOVERVIEW_GPIO_AON:
+ gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+ /* if it's level trigger, unmask GPIM */
+ if (readl(gpit) & BIT(gpio % 32))
+ lnw_set_maskunmask(d, GPIM, 1);
+
+ break;
+ case TANGIER_GPIO:
+ lnw_set_maskunmask(d, GIMR, 1);
+ break;
+ default:
+ break;
+ }
}
static void lnw_irq_mask(struct irq_data *d)
{
+ struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *gpit;
+
+ if (gpio >= lnw->chip.ngpio)
+ return;
+
+ switch (lnw->type) {
+ case CLOVERVIEW_GPIO_AON:
+ gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+ /* if it's level trigger, mask GPIM */
+ if (readl(gpit) & BIT(gpio % 32))
+ lnw_set_maskunmask(d, GPIM, 0);
+
+ break;
+ case TANGIER_GPIO:
+ lnw_set_maskunmask(d, GIMR, 0);
+ break;
+ default:
+ break;
+ }
}
+static int lwn_irq_set_wake(struct irq_data *d, unsigned on)
+{
+ return 0;
+}
+
+static void lnw_irq_ack(struct irq_data *d)
+{
+}
+
+static void lnw_irq_shutdown(struct irq_data *d)
+{
+ struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 value;
+ void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(grer) & (~BIT(gpio % 32));
+ writel(value, grer);
+ value = readl(gfer) & (~BIT(gpio % 32));
+ writel(value, gfer);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+};
+
+
static struct irq_chip lnw_irqchip = {
.name = "LNW-GPIO",
+ .flags = IRQCHIP_SET_TYPE_MASKED,
.irq_mask = lnw_irq_mask,
.irq_unmask = lnw_irq_unmask,
.irq_set_type = lnw_irq_type,
+ .irq_set_wake = lwn_irq_set_wake,
+ .irq_ack = lnw_irq_ack,
+ .irq_shutdown = lnw_irq_shutdown,
};
static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
+ .driver_data = LINCROFT_GPIO },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
+ .driver_data = PENWELL_GPIO_AON },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
+ .driver_data = PENWELL_GPIO_CORE },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
+ .driver_data = CLOVERVIEW_GPIO_AON },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ .driver_data = CLOVERVIEW_GPIO_CORE },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+ .driver_data = TANGIER_GPIO },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct lnw_gpio *lnw;
+ struct gpio_debug *debug;
u32 base, gpio, mask;
unsigned long pending;
- void __iomem *gedr;
+ void __iomem *gp_reg;
+ enum GPIO_REG reg_type;
+ struct irq_desc *lnw_irq_desc;
+ unsigned int lnw_irq;
+
+ lnw = irq_data_get_irq_handler_data(data);
+
+ debug = lnw->debug;
+
+ reg_type = (lnw->type == TANGIER_GPIO) ? GISR : GEDR;
/* check GPIO controller to check which pin triggered the interrupt */
for (base = 0; base < lnw->chip.ngpio; base += 32) {
- gedr = gpio_reg(&lnw->chip, base, GEDR);
- while ((pending = readl(gedr))) {
+ gp_reg = gpio_reg(&lnw->chip, base, reg_type);
+ while ((pending = (lnw->type != TANGIER_GPIO) ?
+ readl(gp_reg) :
+ (readl(gp_reg) &
+ readl(gpio_reg(&lnw->chip, base, GIMR))))) {
gpio = __ffs(pending);
+ DEFINE_DEBUG_IRQ_CONUNT_INCREASE(lnw->chip.base +
+ base + gpio);
+ /* Mask irq if not requested in kernel */
+ lnw_irq = irq_find_mapping(lnw->domain, base + gpio);
+ lnw_irq_desc = irq_to_desc(lnw_irq);
+ if (lnw_irq_desc && unlikely(!lnw_irq_desc->action)) {
+ lnw_irq_mask(&lnw_irq_desc->irq_data);
+ continue;
+ }
+
mask = BIT(gpio);
/* Clear before handling so we can't lose an edge */
- writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(lnw->domain,
- base + gpio));
+ writel(mask, gp_reg);
+ generic_handle_irq(lnw_irq);
}
}
chip->irq_eoi(data);
}
+static char conf_reg_msg[] =
+ "\nGPIO configuration register:\n"
+ "\t[ 2: 0]\tpinmux\n"
+ "\t[ 6: 4]\tpull strength\n"
+ "\t[ 8: 8]\tpullup enable\n"
+ "\t[ 9: 9]\tpulldown enable\n"
+ "\t[10:10]\tslew A, B setting\n"
+ "\t[12:12]\toverride input enable\n"
+ "\t[13:13]\toverride input enable enable\n"
+ "\t[14:14]\toverride output enable\n"
+ "\t[15:15]\toverride output enable enable\n"
+ "\t[16:16]\toverride input value\n"
+ "\t[17:17]\tenable input data override\n"
+ "\t[18:18]\toverride output value\n"
+ "\t[19:19]\tenable output data override\n"
+ "\t[21:21]\topen drain enable\n"
+ "\t[22:22]\tenable OVR_IOSTBY_VAL\n"
+ "\t[23:23]\tOVR_IOSTBY_VAL\n"
+ "\t[24:24]\tSBY_OUTDATAOV_EN\n"
+ "\t[25:25]\tSBY_INDATAOV_EN\n"
+ "\t[26:26]\tSBY_OVOUTEN_EN\n"
+ "\t[27:27]\tSBY_OVINEN_EN\n"
+ "\t[29:28]\tstandby pullmode\n"
+ "\t[30:30]\tstandby open drain mode\n";
+
+static char *pinvalue[] = {"low", "high"};
+static char *pindirection[] = {"in", "out"};
+static char *irqtype[] = {"irq_none", "edge_rising", "edge_falling",
+ "edge_both"};
+static char *pinmux[] = {"mode0", "mode1", "mode2", "mode3", "mode4", "mode5",
+ "mode6", "mode7"};
+static char *pullmode[] = {"nopull", "pullup", "pulldown"};
+static char *pullstrength[] = {"2k", "20k", "50k", "910ohms"};
+static char *enable[] = {"disable", "enable"};
+static char *override_direction[] = {"no-override", "override-enable",
+ "override-disable"};
+static char *override_value[] = {"no-override", "override-high",
+ "override-low"};
+static char *standby_trigger[] = {"no-override", "override-trigger",
+ "override-notrigger"};
+static char *standby_pupd_state[] = {"keep", "pulldown", "pullup", "nopull"};
+
+static int gpio_get_pinvalue(struct gpio_control *control, void *private_data,
+ unsigned gpio)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 value;
+
+ value = lnw_gpio_get(&lnw->chip, gpio);
+
+ return value ? 1 : 0;
+}
+
+static int gpio_set_pinvalue(struct gpio_control *control, void *private_data,
+ unsigned gpio, unsigned int num)
+{
+ struct lnw_gpio *lnw = private_data;
+
+ lnw_gpio_set(&lnw->chip, gpio, num);
+ return 0;
+}
+
+static int gpio_get_normal(struct gpio_control *control, void *private_data,
+ unsigned gpio)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 __iomem *mem;
+ u32 value;
+
+ mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+ value = readl(mem);
+ value &= BIT(gpio % 32);
+
+ if (control->invert)
+ return value ? 0 : 1;
+ else
+ return value ? 1 : 0;
+}
+
+static int gpio_set_normal(struct gpio_control *control, void *private_data,
+ unsigned gpio, unsigned int num)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 __iomem *mem;
+ u32 value;
+ unsigned long flags;
+
+ mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(mem);
+ value &= ~BIT(gpio % 32);
+ if (control->invert) {
+ if (num)
+ value &= ~BIT(gpio % 32);
+ else
+ value |= BIT(gpio % 32);
+ } else {
+ if (num)
+ value |= BIT(gpio % 32);
+ else
+ value &= ~BIT(gpio % 32);
+ }
+ writel(value, mem);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
+ return 0;
+}
+
+static int gpio_get_irqtype(struct gpio_control *control, void *private_data,
+ unsigned gpio)
+{
+ struct lnw_gpio *lnw = private_data;
+ void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+ u32 value;
+ int num;
+
+ value = readl(grer) & BIT(gpio % 32);
+ num = value ? 1 : 0;
+ value = readl(gfer) & BIT(gpio % 32);
+ if (num)
+ num = value ? 3 : 1;
+ else
+ num = value ? 2 : 0;
+
+ return num;
+}
+
+static int flis_get_normal(struct gpio_control *control, void *private_data,
+ unsigned gpio)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 offset, value;
+ int num;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -1;
+
+ value = get_flis_value(offset);
+ num = (value >> control->shift) & control->mask;
+ if (num < control->num)
+ return num;
+ }
+
+ return -1;
+}
+
+static int flis_set_normal(struct gpio_control *control, void *private_data,
+ unsigned gpio, unsigned int num)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 shift = control->shift;
+ u32 mask = control->mask;
+ u32 offset, value;
+ unsigned long flags;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -1;
+
+ if (!is_merr_i2c_flis(offset))
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = get_flis_value(offset);
+ value &= ~(mask << shift);
+ value |= ((num & mask) << shift);
+ set_flis_value(value, offset);
+ if (!is_merr_i2c_flis(offset))
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int flis_get_override(struct gpio_control *control, void *private_data,
+ unsigned gpio)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 offset, value;
+ u32 val_bit, en_bit;
+ int num;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -1;
+
+ val_bit = 1 << control->shift;
+ en_bit = 1 << control->rshift;
+
+ value = get_flis_value(offset);
+
+ if (value & en_bit)
+ if (value & val_bit)
+ num = 1;
+ else
+ num = 2;
+ else
+ num = 0;
+
+ return num;
+ }
+
+ return -1;
+}
+
+static int flis_set_override(struct gpio_control *control, void *private_data,
+ unsigned gpio, unsigned int num)
+{
+ struct lnw_gpio *lnw = private_data;
+ u32 offset, value;
+ u32 val_bit, en_bit;
+ unsigned long flags;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -1;
+
+ val_bit = 1 << control->shift;
+ en_bit = 1 << control->rshift;
+
+ if (!is_merr_i2c_flis(offset))
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = get_flis_value(offset);
+ switch (num) {
+ case 0:
+ value &= ~(en_bit | val_bit);
+ break;
+ case 1:
+ value |= (en_bit | val_bit);
+ break;
+ case 2:
+ value |= en_bit;
+ value &= ~val_bit;
+ break;
+ default:
+ break;
+ }
+ set_flis_value(value, offset);
+ if (!is_merr_i2c_flis(offset))
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+#define GPIO_VALUE_CONTROL(xtype, xinfo, xnum) \
+{ .type = xtype, .pininfo = xinfo, .num = xnum, \
+ .get = gpio_get_pinvalue, .set = gpio_set_pinvalue}
+#define GPIO_NORMAL_CONTROL(xtype, xinfo, xnum, xreg, xinvert) \
+{ .type = xtype, .pininfo = xinfo, .num = xnum, .reg = xreg, \
+ .invert = xinvert, .get = gpio_get_normal, .set = gpio_set_normal}
+#define GPIO_IRQTYPE_CONTROL(xtype, xinfo, xnum) \
+{ .type = xtype, .pininfo = xinfo, .num = xnum, \
+ .get = gpio_get_irqtype, .set = NULL}
+#define FLIS_NORMAL_CONTROL(xtype, xinfo, xnum, xshift, xmask) \
+{ .type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+ .mask = xmask, .get = flis_get_normal, .set = flis_set_normal}
+#define FLIS_OVERRIDE_CONTROL(xtype, xinfo, xnum, xshift, xrshift) \
+{ .type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+ .rshift = xrshift, .get = flis_get_override, .set = flis_set_override}
+
+static struct gpio_control lnw_gpio_controls[] = {
+GPIO_VALUE_CONTROL(TYPE_PIN_VALUE, pinvalue, 2),
+GPIO_NORMAL_CONTROL(TYPE_DIRECTION, pindirection, 2, GPDR, 0),
+GPIO_IRQTYPE_CONTROL(TYPE_IRQ_TYPE, irqtype, 4),
+GPIO_NORMAL_CONTROL(TYPE_DEBOUNCE, enable, 2, GFBR_TNG, 1),
+FLIS_NORMAL_CONTROL(TYPE_PINMUX, pinmux, 8, 0, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLSTRENGTH, pullstrength, 4, 4, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLMODE, pullmode, 3, 8, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_OPEN_DRAIN, enable, 2, 21, 0x1),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INDIR, override_direction, 3, 12, 13),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTDIR, override_direction, 3, 14, 15),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INVAL, override_value, 3, 16, 17),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTVAL, override_value, 3, 18, 19),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_IO, standby_trigger, 3, 23, 22),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTVAL, override_value, 3, 18, 24),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INVAL, override_value, 3, 16, 25),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTDIR, override_direction, 3, 14, 26),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INDIR, override_direction, 3, 12, 27),
+FLIS_NORMAL_CONTROL(TYPE_SBY_PUPD_STATE, standby_pupd_state, 4, 28, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_SBY_OD_DIS, enable, 2, 30, 0x1),
+};
+
+static unsigned int lnw_get_conf_reg(struct gpio_debug *debug, unsigned gpio)
+{
+ struct lnw_gpio *lnw = debug->private_data;
+ u32 offset, value = 0;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return -EINVAL;
+
+ value = get_flis_value(offset);
+ }
+
+ return value;
+}
+
+static void lnw_set_conf_reg(struct gpio_debug *debug, unsigned gpio,
+ unsigned int value)
+{
+ struct lnw_gpio *lnw = debug->private_data;
+ u32 offset;
+
+ if (lnw->type == TANGIER_GPIO) {
+ offset = lnw->get_flis_offset(gpio);
+ if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+ return;
+
+ set_flis_value(value, offset);
+ }
+
+ return;
+}
+
+static char **lnw_get_avl_pininfo(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type, unsigned *num)
+{
+ struct gpio_control *control;
+
+ control = find_gpio_control(lnw_gpio_controls,
+ ARRAY_SIZE(lnw_gpio_controls), type);
+ if (control == NULL)
+ return NULL;
+
+ *num = control->num;
+
+ return control->pininfo;
+}
+
+static char *lnw_get_cul_pininfo(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type)
+{
+ struct lnw_gpio *lnw = debug->private_data;
+ struct gpio_control *control;
+ int num;
+
+ control = find_gpio_control(lnw_gpio_controls,
+ ARRAY_SIZE(lnw_gpio_controls), type);
+ if (control == NULL)
+ return NULL;
+
+ num = control->get(control, lnw, gpio);
+ if (num == -1)
+ return NULL;
+
+ return *(control->pininfo + num);
+}
+
+static void lnw_set_pininfo(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type, const char *info)
+{
+ struct lnw_gpio *lnw = debug->private_data;
+ struct gpio_control *control;
+ int num;
+
+ control = find_gpio_control(lnw_gpio_controls,
+ ARRAY_SIZE(lnw_gpio_controls), type);
+ if (control == NULL)
+ return;
+
+ num = find_pininfo_num(control, info);
+ if (num == -1)
+ return;
+
+ if (control->set)
+ control->set(control, lnw, gpio, num);
+}
+
+static int lnw_get_register_msg(char **buf, unsigned long *size)
+{
+ *buf = conf_reg_msg;
+ *size = strlen(conf_reg_msg);
+
+ return 0;
+}
+
+static struct gpio_debug_ops lnw_gpio_debug_ops = {
+ .get_conf_reg = lnw_get_conf_reg,
+ .set_conf_reg = lnw_set_conf_reg,
+ .get_avl_pininfo = lnw_get_avl_pininfo,
+ .get_cul_pininfo = lnw_get_cul_pininfo,
+ .set_pininfo = lnw_set_pininfo,
+ .get_register_msg = lnw_get_register_msg,
+};
+
static void lnw_irq_init_hw(struct lnw_gpio *lnw)
{
void __iomem *reg;
.xlate = irq_domain_xlate_twocell,
};
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
static int lnw_gpio_runtime_idle(struct device *dev)
{
int err = pm_schedule_suspend(dev, 500);
}
static const struct dev_pm_ops lnw_gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
+ SET_RUNTIME_PM_OPS(lnw_gpio_runtime_suspend,
+ lnw_gpio_runtime_resume,
+ lnw_gpio_runtime_idle)
};
static int lnw_gpio_probe(struct pci_dev *pdev,
void *base;
resource_size_t start, len;
struct lnw_gpio *lnw;
+ struct gpio_debug *debug;
u32 gpio_base;
u32 irq_base;
int retval;
- int ngpio = id->driver_data;
+ struct lnw_gpio_ddata_t *ddata;
+ int pid;
+
+ pid = id->driver_data;
+ ddata = &lnw_gpio_ddata[pid];
retval = pci_enable_device(pdev);
if (retval)
goto err_ioremap;
}
+ lnw->type = pid;
lnw->reg_base = base;
+ lnw->reg_gplr = lnw->reg_base + ddata->gplr_offset;
+ lnw->get_flis_offset = ddata->get_flis_offset;
+ lnw->chip_irq_type = ddata->chip_irq_type;
lnw->chip.label = dev_name(&pdev->dev);
lnw->chip.request = lnw_gpio_request;
lnw->chip.direction_input = lnw_gpio_direction_input;
lnw->chip.direction_output = lnw_gpio_direction_output;
+ lnw->chip.set_pinmux = lnw_gpio_set_alt;
+ lnw->chip.get_pinmux = gpio_get_alt;
lnw->chip.get = lnw_gpio_get;
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = ngpio;
+ lnw->chip.ngpio = ddata->ngpio;
lnw->chip.can_sleep = 0;
+ lnw->chip.set_debounce = lnw_gpio_set_debounce;
+ lnw->chip.dev = &pdev->dev;
lnw->pdev = pdev;
-
- lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+ spin_lock_init(&lnw->lock);
+ lnw->domain = irq_domain_add_simple(pdev->dev.of_node,
+ lnw->chip.ngpio, irq_base,
&lnw_gpio_irq_ops, lnw);
if (!lnw->domain) {
retval = -ENOMEM;
}
lnw_irq_init_hw(lnw);
-
irq_set_handler_data(pdev->irq, lnw);
irq_set_chained_handler(pdev->irq, lnw_irq_handler);
- spin_lock_init(&lnw->lock);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
+ /* add for gpiodebug */
+ debug = gpio_debug_alloc();
+ if (debug) {
+ __set_bit(TYPE_OVERRIDE_OUTDIR, debug->typebit);
+ __set_bit(TYPE_OVERRIDE_OUTVAL, debug->typebit);
+ __set_bit(TYPE_OVERRIDE_INDIR, debug->typebit);
+ __set_bit(TYPE_OVERRIDE_INVAL, debug->typebit);
+ __set_bit(TYPE_SBY_OVR_IO, debug->typebit);
+ __set_bit(TYPE_SBY_OVR_OUTVAL, debug->typebit);
+ __set_bit(TYPE_SBY_OVR_INVAL, debug->typebit);
+ __set_bit(TYPE_SBY_OVR_OUTDIR, debug->typebit);
+ __set_bit(TYPE_SBY_OVR_INDIR, debug->typebit);
+ __set_bit(TYPE_SBY_PUPD_STATE, debug->typebit);
+ __set_bit(TYPE_SBY_OD_DIS, debug->typebit);
+
+ debug->chip = &lnw->chip;
+ debug->ops = &lnw_gpio_debug_ops;
+ debug->private_data = lnw;
+ lnw->debug = debug;
+
+ retval = gpio_debug_register(debug);
+ if (retval) {
+ dev_err(&pdev->dev, "langwell gpio_debug_register failed %d\n",
+ retval);
+ gpio_debug_remove(debug);
+ }
+ }
+
return 0;
err_ioremap:
return ret;
}
-device_initcall(lnw_gpio_init);
+fs_initcall(lnw_gpio_init);
#define PCA953X_INVERT 2
#define PCA953X_DIRECTION 3
+#define PCAL953X_IN_LATCH 34
+#define PCAL953X_INT_MASK 37
+#define PCAL953X_INT_STAT 38
+
#define REG_ADDR_AI 0x80
#define PCA957X_IN 0
#define PCA_INT 0x0100
#define PCA953X_TYPE 0x1000
#define PCA957X_TYPE 0x2000
+#define PCAL953X_TYPE 0x4000
static const struct i2c_device_id pca953x_id[] = {
- { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
- { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
- { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
- { "pca9536", 4 | PCA953X_TYPE, },
- { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
- { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
- { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
- { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
- { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
- { "pca9556", 8 | PCA953X_TYPE, },
- { "pca9557", 8 | PCA953X_TYPE, },
- { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
- { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
-
- { "max7310", 8 | PCA953X_TYPE, },
- { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
- { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
- { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
- { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
- { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
- { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
- { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
+ { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
+ { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9536", 4 | PCA953X_TYPE, },
+ { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
+ { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9556", 8 | PCA953X_TYPE, },
+ { "pca9557", 8 | PCA953X_TYPE, },
+ { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
+ { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+
+ { "pcal9535a", 16 | PCAL953X_TYPE | PCA_INT, },
+ { "pcal9555a", 16 | PCAL953X_TYPE | PCA_INT, },
+
+ { "max7310", 8 | PCA953X_TYPE, },
+ { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
+ { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
NBANK(chip), val);
} else {
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
reg << 1, (u16) *val);
reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
break;
& ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_OUTPUT;
break;
/* then direction */
reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
break;
mutex_lock(&chip->i2c_lock);
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_INPUT;
break;
& ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_OUTPUT;
break;
level + (BANK_SZ * i));
new_irqs &= ~(1 << level);
}
+
+ if (chip->chip_type == PCAL953X_TYPE) {
+ /* Enable latch on interrupt-enabled inputs */
+ pca953x_write_single(chip, PCAL953X_IN_LATCH,
+ chip->irq_mask[i],
+ BANK_SZ * i);
+ /* Unmask enabled interrupts */
+ pca953x_write_single(chip, PCAL953X_INT_MASK,
+ ~chip->irq_mask[i],
+ BANK_SZ * i);
+ }
}
mutex_unlock(&chip->irq_lock);
int ret, i, offset = 0;
switch (chip->chip_type) {
+ case PCAL953X_TYPE:
case PCA953X_TYPE:
offset = PCA953X_INPUT;
break;
return pendings;
}
+static u32 pcal953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
+{
+ u8 cur_stat[MAX_BANK];
+ int ret, i = 0;
+ u8 pendings = 0;
+
+ /* Read the current interrupt status from the device */
+ ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, pending);
+ if (ret)
+ return 0;
+
+ /* Check latched inputs and clear interrupt status */
+ ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
+ if (ret)
+ return 0;
+
+ /* Apply filter for rising/falling edge selection */
+ for (i = 0; i < NBANK(chip); i++) {
+ pending[i] &= (~cur_stat[i] & chip->irq_trig_fall[i]) |
+ (cur_stat[i] & chip->irq_trig_raise[i]);
+ pendings += pending[i];
+ }
+
+ return pendings;
+}
+
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
u8 pending[MAX_BANK];
u8 level;
- int i;
+ int i, pendings;
- if (!pca953x_irq_pending(chip, pending))
+ if (chip->chip_type == PCAL953X_TYPE)
+ pendings = pcal953x_irq_pending(chip, pending);
+ else
+ pendings = pca953x_irq_pending(chip, pending);
+
+ if (!pendings)
return IRQ_HANDLED;
for (i = 0; i < NBANK(chip); i++) {
if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_INPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_IN;
- break;
+ if (chip->chip_type != PCAL953X_TYPE) {
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_regs(chip, offset, chip->irq_stat);
+ if (ret)
+ return ret;
+
+ /*
+ * There is no way to know which GPIO line generated the
+ * interrupt. We have to rely on the previous read for
+ * this purpose.
+ */
+ for (i = 0; i < NBANK(chip); i++)
+ chip->irq_stat[i] &= chip->reg_direction[i];
}
- ret = pca953x_read_regs(chip, offset, chip->irq_stat);
- if (ret)
- return ret;
-
- /*
- * There is no way to know which GPIO line generated the
- * interrupt. We have to rely on the previous read for
- * this purpose.
- */
- for (i = 0; i < NBANK(chip); i++)
- chip->irq_stat[i] &= chip->reg_direction[i];
mutex_init(&chip->irq_lock);
chip->domain = irq_domain_add_simple(client->dev.of_node,
chip->client = client;
- chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
+ chip->chip_type = id->driver_data &
+ (PCAL953X_TYPE | PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
*/
pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
- if (chip->chip_type == PCA953X_TYPE)
+ if (chip->chip_type & (PCA953X_TYPE | PCAL953X_TYPE))
ret = device_pca953x_init(chip, invert);
else
ret = device_pca957x_init(chip, invert);
{ .compatible = "nxp,pca9574", },
{ .compatible = "nxp,pca9575", },
+ { .compatible = "nxp,pcal9535a", },
+ { .compatible = "nxp,pcal9555a", },
+
{ .compatible = "maxim,max7310", },
{ .compatible = "maxim,max7312", },
{ .compatible = "maxim,max7313", },
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include<linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "gpiodebug.h"
+
+struct gpiodebug_data {
+ struct gpio_debug *debug;
+ int gpio;
+ unsigned int type;
+};
+
+enum {
+ REGISTER_FOPS = 0,
+ NORMAL_FOPS,
+ COUNT_FOPS,
+};
+
+static struct {
+ unsigned fops_type;
+ unsigned type;
+ char *available_name;
+ char *current_name;
+} global_array[] = {
+ {REGISTER_FOPS, TYPE_CONF_REG, "conf_reg", "conf_reg"},
+ {NORMAL_FOPS, TYPE_PIN_VALUE, "available_value",
+ "current_value"},
+ {NORMAL_FOPS, TYPE_DIRECTION, "available_direction",
+ "current_direction"},
+ {NORMAL_FOPS, TYPE_IRQ_TYPE, "available_irqtype",
+ "current_irqtype"},
+ {NORMAL_FOPS, TYPE_PINMUX, "available_pinmux",
+ "current_pinmux"},
+ {NORMAL_FOPS, TYPE_PULLMODE, "available_pullmode",
+ "current_pullmode"},
+ {NORMAL_FOPS, TYPE_PULLSTRENGTH, "available_pullstrength",
+ "current_pullstrength"},
+ {NORMAL_FOPS, TYPE_OPEN_DRAIN, "available_opendrain",
+ "current_opendrain"},
+ {COUNT_FOPS, TYPE_IRQ_COUNT, "irq_count", "irq_count"},
+ {NORMAL_FOPS, TYPE_WAKEUP, "available_wakeup", "current_wakeup"},
+ {COUNT_FOPS, TYPE_WAKEUP_COUNT, "wakeup_count", "wakeup_count"},
+ {NORMAL_FOPS, TYPE_DEBOUNCE, "available_debounce",
+ "current_debounce"},
+ {NORMAL_FOPS, TYPE_OVERRIDE_OUTDIR, "available_override_outdir",
+ "current_override_outdir"},
+ {NORMAL_FOPS, TYPE_OVERRIDE_OUTVAL, "available_override_outval",
+ "current_override_outval"},
+ {NORMAL_FOPS, TYPE_OVERRIDE_INDIR, "available_override_indir",
+ "current_override_indir"},
+ {NORMAL_FOPS, TYPE_OVERRIDE_INVAL, "available_override_inval",
+ "current_override_inval"},
+ {NORMAL_FOPS, TYPE_SBY_OVR_IO, "available_standby_trigger",
+ "current_standby_trigger"},
+ {NORMAL_FOPS, TYPE_SBY_OVR_OUTVAL, "available_standby_outval",
+ "current_standby_outval"},
+ {NORMAL_FOPS, TYPE_SBY_OVR_INVAL, "available_standby_inval",
+ "current_standby_inval"},
+ {NORMAL_FOPS, TYPE_SBY_OVR_OUTDIR, "available_standby_outdir",
+ "current_standby_outdir"},
+ {NORMAL_FOPS, TYPE_SBY_OVR_INDIR, "available_standby_indir",
+ "current_standby_indir"},
+ {NORMAL_FOPS, TYPE_SBY_PUPD_STATE, "available_standby_pullmode",
+ "current_standby_pullmode"},
+ {NORMAL_FOPS, TYPE_SBY_OD_DIS, "available_standby_opendrain",
+ "current_standby_opendrain"},
+
+};
+
+static struct dentry *gpio_root[ARCH_NR_GPIOS];
+static struct gpiodebug_data global_data[ARCH_NR_GPIOS][TYPE_MAX];
+
+static struct dentry *gpiodebug_debugfs_root;
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+ unsigned type)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if ((control+i)->type == type)
+ break;
+ }
+
+ if (i < num)
+ return control+i;
+
+ return NULL;
+}
+
+int find_pininfo_num(struct gpio_control *control, const char *info)
+{
+ int num = 0;
+
+ while (num < control->num) {
+ if (!strcmp(*(control->pininfo+num), info))
+ break;
+ num++;
+ }
+
+ if (num < control->num)
+ return num;
+
+ return -1;
+}
+
+static struct dentry *gpiodebug_create_file(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data, const struct file_operations *fops)
+{
+ struct dentry *ret;
+
+ ret = debugfs_create_file(name, mode, parent, data, fops);
+ if (!ret)
+ pr_warn("Could not create debugfs '%s' entry\n", name);
+
+ return ret;
+}
+
+static int gpiodebug_open_file(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static const char readme_msg[] =
+ "\n GPIO Debug Tool-HOWTO (Example):\n\n"
+ "# mount -t debugfs nodev /sys/kernel/debug\n\n"
+ "# cat /sys/kernel/debug/gpio_debug/gpio0/available_pullmode\n"
+ "nopull pullup pulldown\n\n"
+ "# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+ "nopull\n"
+ "# echo pullup > /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+ "# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+ "pullup\n\n"
+ "# cat conf_reg\n"
+ "0x00003120\n"
+ "# echo 0x00003121 > conf_reg\n"
+ "0x00003121\n\n"
+ "# cat irq_count\n"
+ "1\n";
+
+/* gpio_readme_fops */
+static ssize_t show_gpio_readme(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, readme_msg,
+ strlen(readme_msg));
+
+ return ret;
+}
+
+static const struct file_operations gpio_readme_fops = {
+ .open = gpiodebug_open_file,
+ .read = show_gpio_readme,
+ .llseek = generic_file_llseek,
+};
+
+/* gpio_reginfo_fops */
+static ssize_t show_gpio_reginfo(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpio_debug *debug = filp->private_data;
+ unsigned long size;
+ char *buf;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ if (debug->ops->get_register_msg) {
+ debug->ops->get_register_msg(&buf, &size);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, size);
+ }
+
+ return ret;
+}
+
+static const struct file_operations gpio_reginfo_fops = {
+ .open = gpiodebug_open_file,
+ .read = show_gpio_reginfo,
+ .llseek = generic_file_llseek,
+};
+
+
+/* gpio_conf_fops */
+static ssize_t gpio_conf_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ int gpio = data->gpio;
+ char *buf;
+ unsigned int value = 0;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (debug->ops->get_conf_reg)
+ value = debug->ops->get_conf_reg(debug, gpio);
+
+ if (value == -EINVAL)
+ ret = sprintf(buf, "Invalid pin\n");
+ else
+ ret = sprintf(buf, "0x%08x\n", value);
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t gpio_conf_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ int i, gpio = data->gpio;
+ char *buf, *start;
+ unsigned int value;
+
+ ret = cnt;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ start = buf;
+
+ while (*start == ' ')
+ start++;
+
+ /* strip ending whitespace. */
+ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+ buf[i] = 0;
+
+ kstrtoul(start, 16, &value);
+
+ if (debug->ops->set_conf_reg)
+ debug->ops->set_conf_reg(debug, gpio, value);
+
+ *ppos += ret;
+
+ return ret;
+}
+
+static const struct file_operations gpio_conf_fops = {
+ .open = gpiodebug_open_file,
+ .read = gpio_conf_read,
+ .write = gpio_conf_write,
+ .llseek = generic_file_llseek,
+};
+
+/* show_gpiodebug_fops */
+static ssize_t gpiodebug_show_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ unsigned int type = data->type;
+ int i, num = 0;
+ int gpio = data->gpio;
+ char *buf, **avl_buf = NULL;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* debug->ops->get_avl_info */
+ if (debug->ops->get_avl_pininfo) {
+ avl_buf = debug->ops->get_avl_pininfo(debug, gpio, type, &num);
+
+ for (i = 0; i < num; i++)
+ sprintf(buf, "%s%s\t", buf, *(avl_buf+i));
+ }
+
+ ret = sprintf(buf, "%s\n", buf);
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations show_gpiodebug_fops = {
+ .open = gpiodebug_open_file,
+ .read = gpiodebug_show_read,
+ .llseek = generic_file_llseek,
+};
+
+/* set_gpiodebug_fops */
+static ssize_t gpiodebug_set_gpio_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ unsigned int type = data->type;
+ int gpio = data->gpio;
+ char *buf, *cur_info = NULL;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (debug->ops->get_cul_pininfo)
+ cur_info = debug->ops->get_cul_pininfo(debug, gpio, type);
+
+ if (cur_info)
+ ret = sprintf(buf, "%s\n", cur_info);
+ else
+ ret = sprintf(buf, "\n");
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gpiodebug_set_gpio_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ unsigned int type = data->type;
+ int i, gpio = data->gpio;
+ char *buf;
+
+ ret = cnt;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ /* strip ending whitespace. */
+ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+ buf[i] = 0;
+
+ if (debug->ops->set_pininfo)
+ debug->ops->set_pininfo(debug, gpio, type, buf);
+
+ *ppos += ret;
+
+ return ret;
+}
+
+static const struct file_operations set_gpiodebug_fops = {
+ .open = gpiodebug_open_file,
+ .read = gpiodebug_set_gpio_read,
+ .write = gpiodebug_set_gpio_write,
+ .llseek = generic_file_llseek,
+};
+
+/* show_count_fops */
+static ssize_t show_count_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpiodebug_data *data = filp->private_data;
+ struct gpio_debug *debug = data->debug;
+ unsigned int type = data->type;
+ unsigned long count = 0;
+ int gpio = data->gpio;
+ char *buf;
+
+ if (*ppos < 0 || !cnt)
+ return -EINVAL;
+
+ buf = kzalloc(cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (type == TYPE_IRQ_COUNT)
+ count = debug->irq_count[gpio];
+ else if (type == TYPE_WAKEUP_COUNT)
+ count = debug->wakeup_count[gpio];
+
+ ret = sprintf(buf, "%ld\n", count);
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations show_count_fops = {
+ .open = gpiodebug_open_file,
+ .read = show_count_read,
+ .llseek = generic_file_llseek,
+};
+
+/******************************************************************************/
+struct gpio_debug *gpio_debug_alloc(void)
+{
+ struct gpio_debug *debug;
+
+ debug = kzalloc(sizeof(struct gpio_debug), GFP_KERNEL);
+ if (debug) {
+ __set_bit(TYPE_CONF_REG, debug->typebit);
+ __set_bit(TYPE_PIN_VALUE, debug->typebit);
+ __set_bit(TYPE_DIRECTION, debug->typebit);
+ __set_bit(TYPE_IRQ_TYPE, debug->typebit);
+ __set_bit(TYPE_PINMUX, debug->typebit);
+ __set_bit(TYPE_PULLMODE, debug->typebit);
+ __set_bit(TYPE_PULLSTRENGTH, debug->typebit);
+ __set_bit(TYPE_OPEN_DRAIN, debug->typebit);
+ __set_bit(TYPE_IRQ_COUNT, debug->typebit);
+ __set_bit(TYPE_DEBOUNCE, debug->typebit);
+ }
+
+ return debug;
+}
+
+void gpio_debug_remove(struct gpio_debug *debug)
+{
+ struct gpio_chip *chip = debug->chip;
+ int base = chip->base;
+ unsigned ngpio = chip->ngpio;
+ int i;
+
+ for (i = base; i < base+ngpio; i++)
+ debugfs_remove_recursive(gpio_root[i]);
+
+ kfree(debug);
+}
+
+int gpio_debug_register(struct gpio_debug *debug)
+{
+ struct gpio_chip *chip = debug->chip;
+ int base = chip->base;
+ unsigned ngpio = chip->ngpio;
+ int i, j;
+ char gpioname[32];
+
+ for (i = base; i < base+ngpio; i++) {
+ sprintf(gpioname, "gpio%d", i);
+ gpio_root[i] = debugfs_create_dir(gpioname,
+ gpiodebug_debugfs_root);
+ if (!gpio_root[i]) {
+ pr_warn("gpiodebug: Failed to create debugfs directory\n");
+ return -ENOMEM;
+ }
+
+ /* register info */
+ gpiodebug_create_file("register_info", 0444, gpio_root[i],
+ debug, &gpio_reginfo_fops);
+
+ for (j = 0; j < ARRAY_SIZE(global_array); j++) {
+ if (test_bit(global_array[j].type, debug->typebit)) {
+ global_data[i][j].gpio = i;
+ global_data[i][j].debug = debug;
+ global_data[i][j].type = global_array[j].type;
+
+ switch (global_array[j].fops_type) {
+ case REGISTER_FOPS:
+ gpiodebug_create_file(
+ global_array[j].current_name, 0644,
+ gpio_root[i], &global_data[i][j],
+ &gpio_conf_fops);
+ break;
+ case NORMAL_FOPS:
+ gpiodebug_create_file(
+ global_array[j].available_name, 0444,
+ gpio_root[i], &global_data[i][j],
+ &show_gpiodebug_fops);
+
+ gpiodebug_create_file(
+ global_array[j].current_name, 0644,
+ gpio_root[i], &global_data[i][j],
+ &set_gpiodebug_fops);
+ break;
+ case COUNT_FOPS:
+ gpiodebug_create_file(
+ global_array[j].current_name, 0444,
+ gpio_root[i], &global_data[i][j],
+ &show_count_fops);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __init gpio_debug_init(void)
+{
+ gpiodebug_debugfs_root = debugfs_create_dir("gpio_debug", NULL);
+ if (IS_ERR(gpiodebug_debugfs_root) || !gpiodebug_debugfs_root) {
+ pr_warn("gpiodebug: Failed to create debugfs directory\n");
+ gpiodebug_debugfs_root = NULL;
+ }
+
+ /* readme */
+ gpiodebug_create_file("readme", 0444, gpiodebug_debugfs_root,
+ NULL, &gpio_readme_fops);
+
+ return 0;
+}
+
+subsys_initcall(gpio_debug_init);
--- /dev/null
+#ifndef __GPIO_DEBUG_H_
+#define __GPIO_DEBUG_H_
+
+#include <linux/gpio.h>
+
+struct gpio_debug;
+
+#define TYPE_CONF_REG 0x00
+#define TYPE_PIN_VALUE 0x01
+#define TYPE_DIRECTION 0x02
+#define TYPE_IRQ_TYPE 0x03
+#define TYPE_PINMUX 0x04
+#define TYPE_PULLMODE 0x05
+#define TYPE_PULLSTRENGTH 0x06
+#define TYPE_OPEN_DRAIN 0x07
+
+#define TYPE_IRQ_COUNT 0x08
+#define TYPE_WAKEUP 0x09
+#define TYPE_WAKEUP_COUNT 0x0A
+#define TYPE_OVERRIDE_OUTDIR 0x0B
+#define TYPE_OVERRIDE_OUTVAL 0x0C
+#define TYPE_OVERRIDE_INDIR 0x0D
+#define TYPE_OVERRIDE_INVAL 0x0E
+#define TYPE_DEBOUNCE 0x0F
+
+#define TYPE_SBY_OVR_IO 0x10
+#define TYPE_SBY_OVR_OUTVAL 0x11
+#define TYPE_SBY_OVR_INVAL 0x12
+#define TYPE_SBY_OVR_OUTDIR 0x13
+#define TYPE_SBY_OVR_INDIR 0x14
+#define TYPE_SBY_PUPD_STATE 0x15
+#define TYPE_SBY_OD_DIS 0x16
+#define TYPE_MAX 0x17
+
+struct gpio_control {
+ unsigned type, num;
+ char **pininfo;
+ u32 reg, invert;
+ u32 shift, rshift;
+ u32 mask;
+ int (*get)(struct gpio_control *control, void *private_data,
+ unsigned gpio);
+ int (*set)(struct gpio_control *control, void *private_data,
+ unsigned gpio, unsigned int num);
+};
+
+struct gpio_debug_ops {
+ unsigned int (*get_conf_reg)(struct gpio_debug *debug, unsigned gpio);
+ void (*set_conf_reg)(struct gpio_debug *debug, unsigned gpio,
+ unsigned int value);
+ char **(*get_avl_pininfo)(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type, unsigned *num);
+ char *(*get_cul_pininfo)(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type);
+ void (*set_pininfo)(struct gpio_debug *debug, unsigned gpio,
+ unsigned int type, const char *info);
+ int (*get_register_msg)(char **buf, unsigned long *size);
+};
+
+struct gpio_debug {
+ unsigned long typebit[BITS_TO_LONGS(TYPE_MAX)];
+ struct gpio_chip *chip;
+ struct gpio_debug_ops *ops;
+ unsigned long irq_count[ARCH_NR_GPIOS];
+ unsigned long wakeup_count[ARCH_NR_GPIOS];
+ void *private_data;
+};
+
+#ifdef CONFIG_GPIODEBUG
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio) (debug->irq_count[gpio]++)
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+ unsigned type);
+int find_pininfo_num(struct gpio_control *control, const char *info);
+
+struct gpio_debug *gpio_debug_alloc(void);
+void gpio_debug_remove(struct gpio_debug *debug);
+int gpio_debug_register(struct gpio_debug *debug);
+#else
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio)
+
+static inline struct gpio_control *find_gpio_control(
+ struct gpio_control *control, int num, unsigned type)
+{
+ return NULL;
+}
+static inline int find_pininfo_num(struct gpio_control *control,
+ const char *info)
+{
+ return 0;
+}
+static inline struct gpio_debug *gpio_debug_alloc(void)
+{
+ return NULL;
+}
+
+static inline void gpio_debug_remove(struct gpio_debug *debug)
+{
+ return NULL;
+}
+static inline int gpio_debug_register(struct gpio_debug *debug)
+{
+ return 0;
+}
+#endif
+#endif
/*
* /sys/class/gpio/gpioN... only for GPIOs that are exported
+ * /pinmux
+ * * configures GPIO or alternate function
+ * * r/w as zero (normal GPIO) or alternate function number
* /direction
* * MAY BE OMITTED if kernel won't allow direction changes
* * is read/write as "in" or "out"
* * also affects existing and subsequent "falling" and "rising"
* /edge configuration
*/
+static ssize_t gpio_pinmux_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ unsigned gpio = desc - gpio_desc;
+ struct gpio_chip *chip;
+ ssize_t status = -EINVAL;
+
+ mutex_lock(&sysfs_lock);
+
+ chip = desc->chip;
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else if (chip->get_pinmux != NULL)
+ status = sprintf(buf, "%d\n", chip->get_pinmux(gpio));
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static ssize_t gpio_pinmux_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ unsigned gpio = desc - gpio_desc;
+ ssize_t status = -EINVAL;
+ struct gpio_chip *chip;
+ long mux;
+
+ mutex_lock(&sysfs_lock);
+
+ chip = desc->chip;
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else if (chip->set_pinmux != NULL) {
+ status = kstrtol(buf, 0, &mux);
+ if (status == 0)
+ chip->set_pinmux(gpio, mux);
+ }
+
+ mutex_unlock(&sysfs_lock);
+ return status ? : size;
+}
+
+static DEVICE_ATTR(pinmux, 0644,
+ gpio_pinmux_show, gpio_pinmux_store);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
+ if (!status)
+ status = device_create_file(dev,
+ &dev_attr_pinmux);
+
goto fail_unlock;
}
#include "psb_intel_reg.h"
#include "mdfld_output.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include "mid_bios.h"
#include "intel_bios.h"
#include <linux/i2c.h>
#include <drm/drmP.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_bios.h"
#include "psb_drv.h"
sensor inside your CPU. Most of the family 6 CPUs
are supported. Check Documentation/hwmon/coretemp for details.
+config SENSORS_CORETEMP_INTERRUPT
+ tristate "Intel Core/Core2/Atom temperature sensor Interrupts"
+ depends on SENSORS_CORETEMP
+ help
+ If you say yes here you get support for interrupts when the
+ CPU temperature crosses the programmed threshold.
+
+ This is tested only for specific platforms(e.g Atom). If you
+ are not sure, say N here.
+
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
This driver can also be built as a module. If so, the module
will be called lm95245.
+config MSIC_GPADC
+ tristate "MSIC GPADC driver for Intel Medfield platform"
+ depends on INTEL_SCU_IPC
+ help
+ Say Y here to enable MSIC GPADC driver on Intel Medfield Platform
+
+config INTEL_MCU
+ tristate "Intel generic MCU control interface"
+ help
+ Say Y here to enable control interface for intel mcu
+
+ This driver provide userspace tty interface for the control and
+ message output.
+ You could use normal read/write to complete those operation.
+
config SENSORS_MAX1111
tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
depends on SPI_MASTER
obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+obj-$(CONFIG_MSIC_GPADC) += intel_mid_gpadc.o
+obj-$(CONFIG_INTEL_MCU) += intel_mcu_common.o
obj-$(CONFIG_PMBUS) += pmbus/
#include <linux/smp.h>
#include <linux/moduleparam.h>
#include <asm/msr.h>
+#include <asm/mce.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
-#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
-#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+#define CORETEMP_NAME_LENGTH 33 /* String Length of attrs */
+#define MAX_CORE_ATTRS 5 /* Maximum no of basic attrs */
+#define MAX_THRESH_ATTRS 4 /* Maximum no of threshold attrs */
+#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id)
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
+ * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
+ * from where the thresholds are read.
* @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
+ u32 intrpt_reg;
int attr_size;
bool is_pkg_data;
bool valid;
u16 phys_proc_id;
struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
+
};
struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+static DEFINE_PER_CPU(struct delayed_work, core_threshold_work);
+#endif
static ssize_t show_name(struct device *dev,
struct device_attribute *devattr, char *buf)
{
return sprintf(buf, "%s\n", DRVNAME);
}
+static ssize_t show_tx_triggered(struct device *dev,
+ struct device_attribute *devattr, char *buf,
+ u32 mask)
+{
+ u32 eax, edx;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+
+ rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+
+ return sprintf(buf, "%d\n", !!(eax & mask));
+}
+
+static ssize_t show_t0_triggered(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD0);
+}
+
+static ssize_t show_t1_triggered(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD1);
+}
+
+static ssize_t show_tx(struct device *dev,
+ struct device_attribute *devattr, char *buf,
+ u32 mask, int shift)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ int t;
+
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ t = tdata->tjmax - ((eax & mask) >> shift) * 1000;
+ return sprintf(buf, "%d\n", t);
+}
+
+static ssize_t store_tx(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count,
+ u32 mask, int shift)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ unsigned long val;
+ int diff;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ /*
+ * Thermal threshold mask is 7 bits wide. Values are entered in terms
+ * of milli degree celsius. Hence don't accept val > (127 * 1000)
+ */
+ if (val > tdata->tjmax || val > 127000)
+ return -EINVAL;
+
+ diff = (tdata->tjmax - val) / 1000;
+
+ mutex_lock(&tdata->update_lock);
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ eax = (eax & ~mask) | (diff << shift);
+ wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+ mutex_unlock(&tdata->update_lock);
+
+ return count;
+}
+
+static ssize_t show_t0(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD0,
+ THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t store_t0(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD0,
+ THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t show_t1(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD1,
+ THERM_SHIFT_THRESHOLD1);
+}
+
+static ssize_t store_t1(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD1,
+ THERM_SHIFT_THRESHOLD1);
+}
+
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
}
mutex_unlock(&tdata->update_lock);
+
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
static const struct tjmax __cpuinitconst tjmax_table[] = {
{ "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
{ "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
- { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
+ { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
{ "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
{ "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
};
#define ANY 0xff
static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
- { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
+ { 0x1c, 10, 100000 }, /* D4xx, N4xx, D5xx, N5xx */
{ 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
* Note: Also matches 230 and 330,
* which are covered by tjmax_table
* is undetectable by software
*/
{ 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
- { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
+ { 0x35, ANY, 90000 }, /* Atom Clovertrail */
{ 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
};
return adjust_tjmax(c, id, dev);
}
+static struct platform_device *coretemp_get_pdev(unsigned int cpu)
+{
+ u16 phys_proc_id = TO_PHYS_ID(cpu);
+ struct pdev_entry *p;
+
+ mutex_lock(&pdev_list_mutex);
+
+ list_for_each_entry(p, &pdev_list, list)
+ if (p->phys_proc_id == phys_proc_id) {
+ mutex_unlock(&pdev_list_mutex);
+ return p->pdev;
+ }
+
+ mutex_unlock(&pdev_list_mutex);
+ return NULL;
+}
+
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+/* Interrupt Handler for Core Threshold Events */
+static int coretemp_interrupt(__u64 msr_val)
+{
+ unsigned int cpu = smp_processor_id();
+
+ schedule_delayed_work_on(cpu, &per_cpu(core_threshold_work, cpu), 0);
+ return 0;
+}
+
+static void core_threshold_work_fn(struct work_struct *work)
+{
+ u32 eax, edx;
+ int thresh, event, t0, t1, temp;
+ char *thermal_event[5];
+ bool notify = false;
+ unsigned int cpu = smp_processor_id();
+ int indx = TO_ATTR_NO(cpu);
+ struct platform_device *pdev = coretemp_get_pdev(cpu);
+ struct platform_data *pdata = platform_get_drvdata(pdev);
+ struct temp_data *tdata = pdata->core_data[indx];
+
+ if (!tdata) {
+ pr_err("Could not retrieve temp_data\n");
+ return;
+ }
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax, &edx);
+ if (eax & THERM_LOG_THRESHOLD0) {
+ thresh = 0;
+ event = !!(eax & THERM_STATUS_THRESHOLD0);
+
+ /* Reset the Threshold0 interrupt */
+ eax = eax & ~THERM_LOG_THRESHOLD0;
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+ /* Notify only when we go below the lower threshold */
+ if (event != 1)
+ notify = true;
+
+ } else if (eax & THERM_LOG_THRESHOLD1) {
+ thresh = 1;
+ event = !!(eax & THERM_STATUS_THRESHOLD1);
+
+ /* Reset the Threshold1 interrupt */
+ eax = eax & ~THERM_LOG_THRESHOLD1;
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+ /* Notify only when we go above the upper threshold */
+ if (event != 0)
+ notify = true;
+ }
+
+ /*
+ * Read the current Temperature and send it to user land;
+ * so that the user space can avoid a sysfs read.
+ */
+ temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+
+ /* Read the threshold registers (only) to print threshold values. */
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+ t0 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0) * 1000;
+ t1 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1) * 1000;
+
+
+ if (!notify) {
+ pr_debug("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+ event: %d, level: %d, t0: %d, t1: %d\n",
+ tdata->cpu_core_id, temp, event, thresh, t0, t1);
+ return;
+ } else {
+ pr_info("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+ event: %d, level: %d, t0: %d, t1: %d\n",
+ tdata->cpu_core_id, temp, event, thresh, t0, t1);
+ }
+
+ thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=Core %u",
+ tdata->cpu_core_id);
+ thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%d", temp);
+ thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+ thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", thresh);
+ thermal_event[4] = NULL;
+
+ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, thermal_event);
+
+ kfree(thermal_event[3]);
+ kfree(thermal_event[2]);
+ kfree(thermal_event[1]);
+ kfree(thermal_event[0]);
+}
+
+static void configure_apic(void *info)
+{
+ u32 l;
+ int *flag = (int *)info;
+
+ l = apic_read(APIC_LVTTHMR);
+
+ if (*flag) /* Non-Zero flag Masks the APIC */
+ apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+ else /* Zero flag UnMasks the APIC */
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+}
+
+static int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+ u32 eax, edx;
+ unsigned int cpu = data->cpu;
+ int flag = 1; /* Non-Zero Flag masks the apic */
+
+ smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+
+ if (enable) {
+ INIT_DELAYED_WORK(&per_cpu(core_threshold_work, cpu),
+ core_threshold_work_fn);
+
+ eax |= (THERM_INT_THRESHOLD0_ENABLE |
+ THERM_INT_THRESHOLD1_ENABLE);
+ platform_thermal_notify = coretemp_interrupt;
+
+ pr_info("Enabled Aux0/Aux1 interrupts for coretemp\n");
+ } else {
+ eax &= (~(THERM_INT_THRESHOLD0_ENABLE |
+ THERM_INT_THRESHOLD1_ENABLE));
+ platform_thermal_notify = NULL;
+
+ cancel_delayed_work_sync(&per_cpu(core_threshold_work, cpu));
+ }
+
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, eax, edx);
+
+ flag = 0; /* Flag should be zero to unmask the apic */
+ smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+ return 0;
+}
+#else
+static inline int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+ return 0;
+}
+#endif
+
static int create_name_attr(struct platform_data *pdata,
struct device *dev)
{
}
static int __cpuinit create_core_attrs(struct temp_data *tdata,
- struct device *dev, int attr_no)
+ struct device *dev, int attr_no, bool have_ttarget)
{
int err, i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
- show_ttarget };
+ show_ttarget, show_t0, show_t0_triggered,
+ show_t1, show_t1_triggered };
+ static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count) = { NULL, NULL, NULL, NULL, NULL,
+ store_t0, NULL, store_t1, NULL };
static const char *const names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
"temp%d_input", "temp%d_crit",
- "temp%d_max" };
+ "temp%d_max",
+ "temp%d_threshold1",
+ "temp%d_threshold1_triggered",
+ "temp%d_threshold2",
+ "temp%d_threshold2_triggered" };
for (i = 0; i < tdata->attr_size; i++) {
- snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
- attr_no);
+ snprintf(tdata->attr_name[i], sizeof(tdata->attr_name[i]),
+ names[i], attr_no);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
+ if (rw_ptr[i]) {
+ tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
+ tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
+ }
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
return 0;
exit_free:
- while (--i >= 0)
+ while (--i >= 0) {
+ if (!tdata->sd_attrs[i].dev_attr.attr.name)
+ continue;
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+ }
return err;
}
* fixed for stepping D0 (6EC).
*/
if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
- pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
+ pr_err("Errata AE18 not fixed, update BIOS or "
+ "microcode of the CPU!\n");
return -ENODEV;
}
return 0;
}
-static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
-{
- u16 phys_proc_id = TO_PHYS_ID(cpu);
- struct pdev_entry *p;
-
- mutex_lock(&pdev_list_mutex);
-
- list_for_each_entry(p, &pdev_list, list)
- if (p->phys_proc_id == phys_proc_id) {
- mutex_unlock(&pdev_list_mutex);
- return p->pdev;
- }
-
- mutex_unlock(&pdev_list_mutex);
- return NULL;
-}
-
static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
int pkg_flag)
{
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
+ tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
+ MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
int err, attr_no;
+ bool have_ttarget = false;
/*
* Find attr number for sysfs:
if (!err) {
tdata->ttarget
= tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
- tdata->attr_size++;
+ have_ttarget = true;
}
}
+ /*
+ * Test if we can access the intrpt register. If so, increase
+ * 'size' enough to support t0 and t1 attributes.
+ */
+ err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
+ if (!err)
+ tdata->attr_size += MAX_THRESH_ATTRS;
+
pdata->core_data[attr_no] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, &pdev->dev, attr_no);
+ err = create_core_attrs(tdata, &pdev->dev, attr_no, have_ttarget);
if (err)
goto exit_free;
+ /* Enable threshold interrupt support */
+ config_thresh_intrpt(tdata, 1);
+
return 0;
exit_free:
pdata->core_data[attr_no] = NULL;
struct temp_data *tdata = pdata->core_data[indx];
/* Remove the sysfs attributes */
- for (i = 0; i < tdata->attr_size; i++)
+ for (i = 0; i < tdata->attr_size; i++) {
+ if (!tdata->sd_attrs[i].dev_attr.attr.name)
+ continue;
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+ }
+
+ /* Enable threshold interrupt support */
+ config_thresh_intrpt(tdata, 0);
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
--- /dev/null
+/**
+ * intel_mcu_common.c - Intel MCU common interface file
+ *
+ * Copyright (C) 2014 Intel Inc. - http://www.intel.com
+ *
+ * Authors: Lei Wen <lei.wen@intel.com>,
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <asm/intel_psh_ipc.h>
+#include "intel_mcu_common.h"
+#include <linux/circ_buf.h>
+
+#define APP_IMR_SIZE (1024 * 126)
+#define DRIVER_AUTHOR "Lei Wen <lei.wen@intel.com>"
+#define DRIVER_DESC "Intel mcu common control interface"
+#define INTEL_MCU_TTY_MAJOR 168
+#define INTEL_MCU_TTY_MINORS 3
+
+#define LOAD_APP "load mcu app"
+#define GET_VERSION "get mcu app version"
+struct tty_driver *intel_mcu_tty_driver;
+
+#define VER_LEN 1024
+struct mcu {
+ char ver[VER_LEN];
+ uintptr_t ddr_phy[2];
+ void *ddr[2];
+ int load_in_progress;
+};
+
+struct mcu_data {
+ struct device *dev;
+ struct tty_port port;
+ struct mcu *mcu;
+ struct completion cmp;
+ struct loop_buffer lbuf;
+ int index;
+};
+
+static struct mcu_data *mcu_table[INTEL_MCU_TTY_MINORS];
+static int log_level = 1;
+static char *debug_msg[] = {
+ "fatal",
+ "error",
+ "warning",
+ "info",
+ "debug",
+};
+
+static int send_cmd(struct mcu_data *data,
+ struct psh_msg *in, int ch, int wait)
+{
+ int ret;
+ ret = intel_ia2psh_command(in, NULL, ch, 1000000);
+ if (ret)
+ return ret;
+
+ if (wait) {
+ ret = wait_for_completion_timeout(&data->cmp, 3 * HZ);
+ if (ret == 0)
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static void lbuf_read_reset(struct loop_buffer *lbuf)
+{
+ if (lbuf) {
+ lbuf->off_head = lbuf->off_tail = 0;
+ lbuf->in_reading = 0;
+ }
+}
+
+static int lbuf_read_next(struct loop_buffer *lbuf, u8 **buf, u16 *size)
+{
+ struct frame_head *fhead =
+ (struct frame_head *)(lbuf->addr + lbuf->off_head);
+ *buf = NULL;
+ *size = 0;
+
+ if (lbuf->in_reading) {
+ lbuf->in_reading = 0;
+
+ /* go over previous frame has been read */
+ lbuf->off_head += frame_size(fhead->length);
+ lbuf->off_tail = lbuf->off_head;
+ fhead = (struct frame_head *)(lbuf->addr + lbuf->off_head);
+ }
+
+ if (fhead->sign == LBUF_DISCARD_SIGN) {
+ fhead = (struct frame_head *)lbuf->addr;
+ lbuf->off_head = lbuf->off_tail = 0;
+ }
+
+ if (fhead->sign == LBUF_CELL_SIGN) {
+
+ *buf = lbuf->addr + lbuf->off_head + sizeof(*fhead);
+ *size = fhead->length;
+ lbuf->in_reading = 1;
+ }
+
+ return !lbuf->in_reading;
+}
+
+static int intel_mcu_mcudbg_level(struct mcu_data *data, int level)
+{
+ struct psh_msg in;
+ struct cmd_debug_param *param;
+
+ in.param = 0;
+ in.msg = CMD_MCU_APP_DEBUG;
+ param = (struct cmd_debug_param *) (&(in.param));
+ if (level > 0) {
+ param->level = level;
+ param->sub_cmd = CMD_DEBUG_SET_MASK;
+ } else
+ param->sub_cmd = CMD_DEBUG_GET_MASK;
+
+ return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static void push_char_into_port(struct tty_port *port, const char *buf, int len)
+{
+ int count;
+
+ if (len <= 0)
+ return;
+
+ do {
+ count = tty_insert_flip_string(port, buf, len);
+ len -= count;
+ buf += count;
+ } while (len > 0);
+
+ tty_flip_buffer_push(port);
+}
+
+static int intel_mcu_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ dev_dbg(tty->dev, "%s\n", __func__);
+ tty->driver_data = mcu_table[tty->index];
+ /*
+ * For we may get data cached while we don't open this tty,
+ * so we need to flush out buffer, then we could
+ * get full content without disappoint user
+ */
+ if (tty->port)
+ tty_flip_buffer_push(tty->port);
+
+ return 0;
+}
+
+static void intel_mcu_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ dev_dbg(tty->dev, "%s\n", __func__);
+ tty->driver_data = NULL;
+}
+
+static int do_get_ver(struct mcu_data *data)
+{
+ struct psh_msg in;
+
+ in.param = 0;
+ in.msg = CMD_MCU_APP_GET_VERSION;
+ return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static int do_setup_ddr(struct mcu_data *data)
+{
+ struct mcu *mcu = data->mcu;
+ const struct firmware *fw_entry;
+ static int fw_load_done;
+ char fname[20];
+ struct psh_msg in;
+
+ if (fw_load_done)
+ return 0;
+
+ snprintf(fname, 20, "intel_mcu.bin");
+ if (!request_firmware(&fw_entry, fname, data->dev)) {
+ if (!fw_entry)
+ return -ENOMEM;
+
+ pr_debug("psh fw size %d virt:0x%p\n",
+ (int)fw_entry->size, fw_entry->data);
+ if (fw_entry->size > APP_IMR_SIZE) {
+ pr_err("psh fw size too big\n");
+ } else {
+ memcpy(mcu->ddr[0], fw_entry->data,
+ fw_entry->size);
+ in.msg = CMD_MCU_LOAD_APP;
+ in.param = mcu->ddr_phy[0];
+ mcu->load_in_progress = 1;
+ if (send_cmd(data, &in, PSH2IA_CHANNEL3, 1))
+ return -1;
+ fw_load_done = 1;
+ }
+ release_firmware(fw_entry);
+ } else {
+ pr_err("cannot find psh firmware(%s)\n", fname);
+ return -ENODEV;
+ }
+ in.msg = CMD_MCU_SETUP_DDR;
+ in.param = mcu->ddr_phy[1];
+ return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static ssize_t load_app_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int len = strlen(LOAD_APP);
+
+ if (count >= len && strncmp(buf, LOAD_APP, len) == 0) {
+ do_setup_ddr(mcu_table[2]);
+ return count;
+ }
+
+ pr_err("Please provide right string as [%s]!\n", LOAD_APP);
+ return -1;
+}
+
+static ssize_t get_ver_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcu_data *data = mcu_table[2];
+ struct mcu *mcu = data->mcu;
+
+ if (do_get_ver(data))
+ return -1;
+
+ return scnprintf(buf, VER_LEN, "%s", mcu->ver);
+}
+
+static ssize_t mdbg_control_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ if (intel_mcu_mcudbg_level(mcu_table[2], -1) < 0)
+ goto err;
+
+ if (log_level > 0 && log_level < 6)
+ return scnprintf(buf, 8, "%s\n", debug_msg[log_level - 1]);
+
+err:
+ pr_info("get log level err\n");
+ return -1;
+}
+/*
+ *set msg level:echo log_level=fatal|info|warning|error|debug| >control
+*/
+#define LOG_LEVEL "fatal|error|warning|info|debug"
+static ssize_t mdbg_control_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mcu_data *data = mcu_table[2];
+ int level = 0;
+ long ltmp = 0;
+
+ if (!buf)
+ return -1;
+ if (!strncmp(buf, "fatal", strlen("fatal")))
+ level = 1;
+ else if (!strncmp(buf, "error", strlen("error")))
+ level = 2;
+ else if (!strncmp(buf, "warning", strlen("warning")))
+ level = 3;
+ else if (!strncmp(buf, "info", strlen("info")))
+ level = 4;
+ else if (!strncmp(buf, "debug", strlen("debug")))
+ level = 5;
+ else {
+ int err;
+ err = kstrtol(buf, 10, <mp);
+ if (!err && (ltmp > 0) && (ltmp < 6))
+ level = ltmp;
+ else {
+ pr_err("Please input words as [%s]\n", LOG_LEVEL);
+ return -1;
+ }
+ }
+ pr_info("set level:%d\n", level);
+ if (intel_mcu_mcudbg_level(data, level) < 0)
+ return -1;
+ return count;
+}
+
+static DEVICE_ATTR(control, 0200, NULL, load_app_store);
+static DEVICE_ATTR(fw_version, 0400, get_ver_show, NULL);
+static DEVICE_ATTR(log_level, 0600, mdbg_control_show, mdbg_control_store);
+
+static struct attribute *control_sysfs_attrs[] = {
+ &dev_attr_control.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_log_level.attr,
+ NULL,
+
+};
+
+static struct attribute_group intel_mcu_tty_attribute_group = {
+ .name = NULL,
+ .attrs = control_sysfs_attrs,
+
+};
+
+static void raw_output(struct mcu_data *data, int ch,
+ const unsigned char *buf, int count)
+{
+ struct psh_msg in;
+ int i, left;
+
+ for (i = 0; i < count; i += 4) {
+ left = count - i;
+ if (left > 4) {
+ left = 4;
+ in.msg = PSH_IPC_CONTINUE;
+ } else
+ in.msg = 0;
+
+ memcpy(&in.param, buf, left);
+ buf += left;
+ send_cmd(data, &in, ch, 0);
+ }
+}
+
+#define TTY_WRITE_ROOM 512
+static int intel_mcu_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct mcu_data *data = tty->driver_data;
+
+ switch (tty->index) {
+ default:
+ pr_err("TTY index %d not supported!\n", tty->index);
+ case 1:
+ return -1;
+ case 0:
+ if (count > TTY_WRITE_ROOM) {
+ pr_err("Port 0's input size is limited by %d!\n",
+ TTY_WRITE_ROOM);
+ return -1;
+ }
+ raw_output(data, tty->index, buf, count);
+ break;
+ }
+ return count;
+}
+
+static int intel_mcu_tty_write_room(struct tty_struct *tty)
+{
+ return TTY_WRITE_ROOM;
+}
+
+static const struct tty_operations intel_mcu_ops = {
+ .open = intel_mcu_tty_open,
+ .close = intel_mcu_tty_close,
+ .write = intel_mcu_tty_write,
+ .write_room = intel_mcu_tty_write_room,
+};
+
+static int mem_alloc(struct pci_dev *pdev, uintptr_t *phy_addr,
+ void **virt_addr, int bar)
+{
+ void __iomem *mem;
+ int ret = 0;
+ unsigned long start = 0, len;
+
+ /* dedicate isolated memory region */
+ start = pci_resource_start(pdev, bar);
+ len = pci_resource_len(pdev, bar);
+ if (!start || !len) {
+ dev_err(&pdev->dev, "bar %d address not set\n", bar);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = pci_request_region(pdev, bar, "intel_mcu");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request psh region 0x%lx-0x%lx\n",
+ start,
+ (unsigned long)pci_resource_end(pdev, bar));
+ goto err;
+ }
+
+ mem = ioremap_nocache(start, len);
+ if (!mem) {
+ dev_err(&pdev->dev, "can not ioremap app imr address\n");
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ *phy_addr = start;
+ *virt_addr = (void *)mem;
+ return 0;
+
+err_ioremap:
+ pci_release_region(pdev, bar);
+err:
+ return ret;
+}
+
+static void cmd_handler(u32 msg, u32 param, void *_data)
+{
+ struct mcu_data *data = (struct mcu_data *)_data;
+ struct mcu *mcu = data->mcu;
+ struct cmd_resp *resp;
+ const struct version_resp *version;
+ struct debug_resp *debug_resp;
+ u8 *dbuf = NULL;
+ u16 size = 0;
+
+ if (mcu->load_in_progress) {
+ mcu->load_in_progress = 0;
+ goto done;
+ }
+
+ while (!lbuf_read_next(&data->lbuf, &dbuf, &size)) {
+ resp = (struct cmd_resp *)dbuf;
+
+ if (!resp->len)
+ continue;
+
+ switch (resp->cmd_id) {
+ case CMD_MCU_APP_GET_VERSION:
+ version = (struct version_resp *)resp->param;
+ if (version->total_length)
+ snprintf(mcu->ver, VER_LEN, version->buf,
+ version->total_length);
+ break;
+ case CMD_MCU_APP_DEBUG:
+ debug_resp = (struct debug_resp *)resp->param;
+ log_level = debug_resp->level;
+ default:
+ break;
+ }
+ }
+done:
+ complete(&data->cmp);
+}
+
+static void raw_data_handler(u32 msg, u32 param, void *_data)
+{
+ struct mcu_data *data = (struct mcu_data *)_data;
+ struct cmd_resp *resp;
+ u8 *dbuf = NULL;
+ u16 size = 0;
+
+ while (!lbuf_read_next(&data->lbuf, &dbuf, &size)) {
+ resp = (struct cmd_resp *)dbuf;
+ push_char_into_port(&data->port, resp->param, resp->len);
+ }
+ complete(&data->cmp);
+}
+
+static int mcu_platform_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct mcu_data *data;
+ struct mcu *mcu;
+ u8 *base;
+
+ mcu = platform_get_drvdata(pdev);
+ intel_mcu_tty_driver = alloc_tty_driver(INTEL_MCU_TTY_MINORS);
+ if (!intel_mcu_tty_driver) {
+ dev_err(&pdev->dev, "fail to alloc tty driver\n");
+ return -ENODEV;
+ }
+
+ intel_mcu_tty_driver->name = "ttymcu";
+ intel_mcu_tty_driver->major = INTEL_MCU_TTY_MAJOR;
+ intel_mcu_tty_driver->minor_start = 0;
+ intel_mcu_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ intel_mcu_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ intel_mcu_tty_driver->flags = TTY_DRIVER_REAL_RAW
+ | TTY_DRIVER_DYNAMIC_DEV;
+ intel_mcu_tty_driver->init_termios = tty_std_termios;
+ intel_mcu_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
+ HUPCL | CLOCAL;
+ intel_mcu_tty_driver->init_termios.c_ispeed = 38400;
+ intel_mcu_tty_driver->init_termios.c_ospeed = 38400;
+ intel_mcu_tty_driver->init_termios.c_iflag = 0;
+ intel_mcu_tty_driver->init_termios.c_oflag = 0;
+ intel_mcu_tty_driver->init_termios.c_lflag = 0;
+ tty_set_operations(intel_mcu_tty_driver, &intel_mcu_ops);
+
+ ret = tty_register_driver(intel_mcu_tty_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to register tty driver\n");
+ goto tty_reg_fail;
+ }
+
+ base = (u8 *)mcu->ddr[1];
+ for (i = INTEL_MCU_TTY_MINORS - 1; i >= 0; i--) {
+ data = kzalloc(sizeof(struct mcu_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&pdev->dev, "fail to alloc mcu data\n");
+ goto data_alloc_fail;
+ }
+
+ data->index = i;
+ tty_port_init(&data->port);
+ data->dev = tty_port_register_device(&data->port,
+ intel_mcu_tty_driver, i, &pdev->dev);
+ mcu_table[i] = data;
+ data->mcu = mcu;
+ init_completion(&data->cmp);
+ data->lbuf.addr = base;
+ data->lbuf.length = BUF_IA_DDR_SIZE;
+ lbuf_read_reset(&data->lbuf);
+ base += BUF_IA_DDR_SIZE;
+ }
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &intel_mcu_tty_attribute_group);
+ if (ret) {
+ pr_err("failed to create the mdbg sysfs attributes\n");
+ sysfs_remove_group(&pdev->dev.kobj,
+ &intel_mcu_tty_attribute_group);
+ goto data_alloc_fail;
+ }
+
+ intel_psh_ipc_bind(PSH_RECV_CH0, raw_data_handler, mcu_table[0]);
+ intel_psh_ipc_bind(PSH_RECV_CH1, raw_data_handler, mcu_table[1]);
+ intel_psh_ipc_bind(PSH_RECV_CH2, cmd_handler, mcu_table[2]);
+
+ pr_info("MCU detected and ready to used!\n");
+
+ return 0;
+
+data_alloc_fail:
+ for (i = 0; i < INTEL_MCU_TTY_MINORS; i++)
+ kfree(mcu_table[i]);
+tty_reg_fail:
+ put_tty_driver(intel_mcu_tty_driver);
+ return ret;
+}
+
+static int mcu_platform_remove(struct platform_device *pdev)
+{
+ struct mcu *mcu;
+ int i;
+
+ mcu = platform_get_drvdata(pdev);
+ sysfs_remove_group(&pdev->dev.kobj,
+ &intel_mcu_tty_attribute_group);
+
+ for (i = 0; i < INTEL_MCU_TTY_MINORS; i++)
+ kfree(mcu_table[i]);
+ put_tty_driver(intel_mcu_tty_driver);
+ kfree(mcu);
+
+ return 0;
+}
+
+static struct platform_driver intel_mcu_platform = {
+ .driver = {
+ .name = "intel_mcu",
+ },
+ .probe = mcu_platform_probe,
+ .remove = mcu_platform_remove,
+};
+module_platform_driver(intel_mcu_platform);
+
+static int intel_mcu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct platform_device *dev;
+ struct mcu *mcu;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to enable psh pci device\n");
+ return -ENODEV;
+ }
+
+ mcu = kzalloc(sizeof(struct mcu), GFP_KERNEL);
+ if (!mcu) {
+ dev_err(&pdev->dev, "cannot allocate memory for mcu\n");
+ ret = -ENOMEM;
+ goto mcu_err;
+ }
+
+ ret = mem_alloc(pdev, &mcu->ddr_phy[0], &mcu->ddr[0], 0);
+ if (ret)
+ goto plat_alloc_fail;
+
+ ret = mem_alloc(pdev, &mcu->ddr_phy[1], &mcu->ddr[1], 1);
+ if (ret)
+ goto plat_alloc_fail;
+
+ dev = platform_device_alloc("intel_mcu", -1);
+ if (!dev) {
+ ret = -ENODEV;
+ goto plat_alloc_fail;
+ }
+
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ platform_set_drvdata(dev, mcu);
+ dev_set_drvdata(&pdev->dev, mcu);
+
+ ret = platform_device_add(dev);
+ return ret;
+
+plat_alloc_fail:
+ kfree(mcu);
+mcu_err:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static void intel_mcu_remove(struct pci_dev *pdev)
+{
+ struct mcu *mcu;
+
+ mcu = dev_get_drvdata(&pdev->dev);
+ iounmap((void __iomem *)mcu->ddr[0]);
+ iounmap((void __iomem *)mcu->ddr[1]);
+
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_dev_put(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a4)},
+ { 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+static struct pci_driver intel_mcu_driver = {
+ .name = "intel_mcu",
+ .id_table = pci_ids,
+ .probe = intel_mcu_probe,
+ .remove = intel_mcu_remove,
+};
+
+static int __init intel_mcu_init(void)
+{
+ return pci_register_driver(&intel_mcu_driver);
+}
+
+static void __exit intel_mcu_exit(void)
+{
+ pci_unregister_driver(&intel_mcu_driver);
+}
+
+module_init(intel_mcu_init);
+module_exit(intel_mcu_exit);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(INTEL_MCU_TTY_MAJOR);
--- /dev/null
+#ifndef _EDISON_COMMON_H_
+#define _EDISON_COMMON_H_
+
+#define PSH2IA_CHANNEL0 0
+#define PSH2IA_CHANNEL1 1
+#define PSH2IA_CHANNEL2 2
+#define PSH2IA_CHANNEL3 3
+
+enum cmd_id {
+ CMD_MCU_LOAD_APP = 0,
+ CMD_MCU_SETUP_DDR,
+ CMD_MCU_APP_DEBUG,
+ CMD_MCU_APP_GET_VERSION,
+};
+
+#define CIRC_SIZE (1024 * 64)
+struct ddr_param {
+ u32 ddr;
+ u32 ddr1;
+} __packed;
+
+#define CMD_DEBUG_SET_MASK ((u8)0x1)
+#define CMD_DEBUG_GET_MASK ((u8)0x2)
+#define MCU_DBG_ALL ((u16)-1)
+#define MCU_DBG_FATAL 1
+#define MCU_DBG_ERR 2
+#define MCU_DBG_WARN 3
+#define MCU_DBG_INFO 4
+#define MCU_DBG_DBG 5
+
+struct cmd_debug_param {
+ u8 sub_cmd;
+ u16 level;
+ char tag[30];
+} __packed;
+
+#define RESP_PARAM_MAX_SIZE 56
+struct cmd_resp {
+ u8 cmd_id;
+ u8 len;
+ int ret;
+ char param[RESP_PARAM_MAX_SIZE];
+} __packed;
+
+struct debug_resp {
+ u16 level;
+} __packed;
+
+struct version_resp {
+ u8 total_length;
+ u8 segment_length;
+ u8 sequence_number;
+ char buf[0];
+} __packed;
+
+#define LBUF_CELL_SIGN ((u16)0x4853)
+#define LBUF_EMPTY_SIGN ((u16)0x0000)
+#define LBUF_DISCARD_SIGN ((u16)0x4944)
+#define size_align(size) ((size % 4) ? (size + 4 - (size % 4)) : size)
+#define frame_size(size) (size_align(size) + \
+ sizeof(struct frame_head))
+
+struct frame_head {
+ u16 sign;
+ u16 length;
+ u8 buf[0];
+} __packed;
+
+#define BUF_IA_DDR_SIZE 8192
+struct loop_buffer {
+ int in_reading;
+ u8 *addr;
+ u16 length;
+
+ u16 off_head;
+ u16 off_tail;
+};
+
+#endif
--- /dev/null
+/*
+ * intel_mdf_msic_gpadc.c - Intel Medfield MSIC GPADC Driver
+ *
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_mid_gpadc.h>
+
+#define VAUDACNT 0x0DB
+#define MCCINT 0x013
+#define IRQLVL1 0x002
+#define IRQLVL1MSK 0x021
+#define ADC1INT 0x003
+#define ADC1ADDR0 0x1C5
+#define ADC1SNS0H 0x1D4
+#define ADC1OFFSETH 0x1C3
+#define ADC1OFFSETL 0x1C4
+#define ADC1CNTL1 0x1C0
+#define ADC1CNTL2 0x1C1
+#define ADC1CNTL3 0x1C2
+#define ADC1BV0H 0x1F2
+#define ADC1BI0H 0x1FA
+
+#ifdef CONFIG_BOARD_CTP
+#define EEPROMCAL1 0x309
+#define EEPROMCAL2 0x30A
+#else
+#define EEPROMCAL1 0x317
+#define EEPROMCAL2 0x318
+#endif
+
+#define MCCINT_MCCCAL (1 << 1)
+#define MCCINT_MOVERFLOW (1 << 0)
+
+#define IRQLVL1MSK_ADCM (1 << 1)
+
+#define ADC1CNTL1_AD1OFFSETEN (1 << 6)
+#define ADC1CNTL1_AD1CALEN (1 << 5)
+#define ADC1CNTL1_ADEN (1 << 4)
+#define ADC1CNTL1_ADSTRT (1 << 3)
+#define ADC1CNTL1_ADSLP 7
+#define ADC1CNTL1_ADSLP_DEF 1
+
+#define ADC1INT_ADC1CAL (1 << 2)
+#define ADC1INT_GSM (1 << 1)
+#define ADC1INT_RND (1 << 0)
+
+#define ADC1CNTL3_ADCTHERM (1 << 2)
+#define ADC1CNTL3_GSMDATARD (1 << 1)
+#define ADC1CNTL3_RRDATARD (1 << 0)
+
+#define ADC1CNTL2_DEF 0x7
+#define ADC1CNTL2_ADCGSMEN (1 << 7)
+
+#define MSIC_STOPCH (1 << 4)
+
+#define GPADC_CH_MAX 15
+
+#define GPADC_POWERON_DELAY 1
+
+#define SAMPLE_CH_MAX 2
+
+static void *adc_handle[GPADC_CH_MAX] = {};
+static int sample_result[GPADC_CH_MAX][SAMPLE_CH_MAX];
+static struct completion gsmadc_complete;
+static int vol_val;
+static int cur_val;
+
+struct gpadc_info {
+ int initialized;
+ int depth;
+
+ struct workqueue_struct *workq;
+ wait_queue_head_t trimming_wait;
+ struct work_struct trimming_work;
+ struct work_struct gsmpulse_work;
+ int trimming_start;
+
+ /* This mutex protects gpadc sample/config from concurrent conflict.
+ Any function, which does the sample or config, needs to
+ hold this lock.
+ If it is locked, it also means the gpadc is in active mode.
+ GSM mode sample does not need to hold this lock. It can be used with
+ normal sample concurrent without poweron.
+ */
+ struct mutex lock;
+ struct device *dev;
+ int irq;
+ void __iomem *intr;
+ int irq_status;
+
+ int vzse;
+ int vge;
+ int izse;
+ int ige;
+ int addr_mask;
+
+ wait_queue_head_t wait;
+ int rnd_done;
+ int conv_done;
+ int gsmpulse_done;
+
+ struct pm_qos_request pm_qos_request;
+ void (*gsmadc_notify)(int vol, int cur);
+
+ int pmic_ipc_status;
+};
+
+struct gpadc_request {
+ int count;
+ int vref;
+ int ch[GPADC_CH_MAX];
+ int addr[GPADC_CH_MAX];
+};
+
+static struct gpadc_info gpadc_info;
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ int ret;
+
+ if (mgi->pmic_ipc_status)
+ return -EINVAL;
+
+ ret = intel_scu_ipc_update_register(addr, 0, mask);
+ if (ret)
+ mgi->pmic_ipc_status = -EINVAL;
+
+ return ret;
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ int ret;
+
+ if (mgi->pmic_ipc_status)
+ return -EINVAL;
+
+ ret = intel_scu_ipc_update_register(addr, 0xff, mask);
+ if (ret)
+ mgi->pmic_ipc_status = -EINVAL;
+
+ return ret;
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ int ret;
+
+ if (mgi->pmic_ipc_status)
+ return -EINVAL;
+
+ ret = intel_scu_ipc_iowrite8(addr, data);
+ if (ret)
+ mgi->pmic_ipc_status = -EINVAL;
+
+ return ret;
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ int ret;
+
+ if (mgi->pmic_ipc_status)
+ return -EINVAL;
+
+ ret = intel_scu_ipc_ioread8(addr, data);
+ if (ret)
+ mgi->pmic_ipc_status = -EINVAL;
+
+ return ret;
+}
+
+static void gpadc_dump(struct gpadc_info *mgi)
+{
+ u8 data;
+ int i;
+
+ dev_err(mgi->dev, "pmic ipc status: %s\n",
+ mgi->pmic_ipc_status ? "bad" : "good");
+ gpadc_read(VAUDACNT, &data);
+ dev_err(mgi->dev, "VAUDACNT: 0x%x\n", data);
+ gpadc_read(IRQLVL1MSK, &data);
+ dev_err(mgi->dev, "IRQLVL1MSK: 0x%x\n", data);
+ gpadc_read(IRQLVL1, &data);
+ dev_err(mgi->dev, "IRQLVL1: 0x%x\n", data);
+ gpadc_read(ADC1INT, &data);
+ dev_err(mgi->dev, "ADC1INT: 0x%x\n", data);
+ gpadc_read(ADC1CNTL1, &data);
+ dev_err(mgi->dev, "ADC1CNTL1: 0x%x\n", data);
+ gpadc_read(ADC1CNTL2, &data);
+ dev_err(mgi->dev, "ADC1CNTL2: 0x%x\n", data);
+ gpadc_read(ADC1CNTL3, &data);
+ dev_err(mgi->dev, "ADC1CNTL3: 0x%x\n", data);
+ for (i = 0; i < GPADC_CH_MAX; i++) {
+ gpadc_read(ADC1ADDR0+i, &data);
+ dev_err(mgi->dev, "ADC1ADDR[%d]: 0x%x\n", i, data);
+ }
+}
+
+static int gpadc_poweron(struct gpadc_info *mgi, int vref)
+{
+ if (!mgi->depth++) {
+ if (gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+ return -EIO;
+ msleep(GPADC_POWERON_DELAY);
+ }
+ if (vref) {
+ if (gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+ return -EIO;
+ msleep(GPADC_POWERON_DELAY);
+ }
+ return 0;
+}
+
+static int gpadc_poweroff(struct gpadc_info *mgi)
+{
+ if (!--mgi->depth) {
+ if (gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+ return -EIO;
+ if (gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int gpadc_calib(int rc, int zse, int ge)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ int tmp;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ if (ge == 0) {
+ dev_err(mgi->dev, "calibration divider is zero\n");
+ return 0;
+ }
+
+ /**
+ * For Cloverview, using the calibration data, we have the
+ * voltage and current after calibration correction as below:
+ * V_CAL_CODE = 213.33 * (V_RAW_CODE - VZSE) / VGE
+ * I_CAL_CODE = 213.33 * (I_RAW_CODE - IZSE) / IGE
+ */
+
+ /* note: the input zse is multipled by 10,
+ * input ge is multipled by 100, need to handle them here
+ */
+ tmp = 21333 * (10 * rc - zse) / ge;
+ } else {
+ /**
+ * For Medfield, using the calibration data, we have the
+ * voltage and current after calibration correction as below:
+ * V_CAL_CODE = V_RAW_CODE - (VZSE + (VGE)* VRAW_CODE/1023)
+ * I_CAL_CODE = I_RAW_CODE - (IZSE + (IGE)* IRAW_CODE/1023)
+ */
+ tmp = (10230 * rc - (10230 * zse + 10 * ge * rc)) / 1023;
+ }
+
+ /* tmp is 10 times of result value,
+ * and it's used to obtain result's closest integer
+ */
+ return DIV_ROUND_CLOSEST(tmp, 10);
+
+}
+
+static void gpadc_calc_zse_ge(struct gpadc_info *mgi)
+{
+ u8 data;
+ int fse, zse, fse_sign, zse_sign, ge, ge_sign;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ gpadc_read(EEPROMCAL1, &data);
+ zse = data & 0xf;
+ ge = (data >> 4) & 0xf;
+ gpadc_read(EEPROMCAL2, &data);
+ zse_sign = (data & (1 << 6)) ? -1 : 1;
+ ge_sign = (data & (1 << 7)) ? -1 : 1;
+ zse *= zse_sign;
+ ge *= ge_sign;
+ /* vzse divided by 2 may cause 0.5, x10 to avoid float */
+ mgi->vzse = mgi->izse = zse * 10 / 2;
+ /* vge multiple 100 to avoid float */
+ mgi->vge = mgi->ige = 21333 - (ge * 100 / 4);
+ } else {
+ /* voltage trim */
+ gpadc_read(EEPROMCAL1, &data);
+ zse = (data & 0xf)/2;
+ fse = ((data >> 4) & 0xf)/2;
+ gpadc_read(EEPROMCAL2, &data);
+ zse_sign = (data & (1 << 6)) ? 1 : 0;
+ fse_sign = (data & (1 << 7)) ? 1 : 0;
+ zse *= zse_sign;
+ fse *= fse_sign;
+ mgi->vzse = zse;
+ mgi->vge = fse - zse;
+
+ /* current trim */
+ fse = (data & 0xf)/2;
+ fse_sign = (data & (1 << 5)) ? 1 : 0;
+ fse = ~(fse_sign * fse) + 1;
+ gpadc_read(ADC1OFFSETH, &data);
+ zse = data << 2;
+ gpadc_read(ADC1OFFSETL, &data);
+ zse += data & 0x3;
+ mgi->izse = zse;
+ mgi->ige = fse + zse;
+ }
+}
+
+static void gpadc_trimming(struct work_struct *work)
+{
+ u8 data;
+ struct gpadc_info *mgi =
+ container_of(work, struct gpadc_info, trimming_work);
+
+ mutex_lock(&mgi->lock);
+ mgi->trimming_start = 1;
+ wake_up(&mgi->trimming_wait);
+ if (gpadc_poweron(mgi, 1)) {
+ dev_err(mgi->dev, "power on failed\n");
+ goto failed;
+ }
+ /* calibration */
+ gpadc_read(ADC1CNTL1, &data);
+ data &= ~ADC1CNTL1_AD1OFFSETEN;
+ data |= ADC1CNTL1_AD1CALEN;
+ gpadc_write(ADC1CNTL1, data);
+ gpadc_read(ADC1INT, &data);
+
+ /*workarround: no calib int */
+ msleep(300);
+ gpadc_set_bits(ADC1INT, ADC1INT_ADC1CAL);
+ gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1CALEN);
+
+ gpadc_calc_zse_ge(mgi);
+
+ if (gpadc_poweroff(mgi)) {
+ dev_err(mgi->dev, "power off failed\n");
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&mgi->lock);
+}
+
+static irqreturn_t msic_gpadc_isr(int irq, void *data)
+{
+ struct gpadc_info *mgi = data;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+ mgi->irq_status = ADC1INT_RND;
+ else
+ mgi->irq_status = readl(mgi->intr) >> 8 & 0xff;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t msic_gpadc_irq(int irq, void *data)
+{
+ struct gpadc_info *mgi = data;
+
+ if (mgi->irq_status & ADC1INT_GSM) {
+ mgi->gsmpulse_done = 1;
+ queue_work(mgi->workq, &mgi->gsmpulse_work);
+ } else if (mgi->irq_status & ADC1INT_RND) {
+ mgi->rnd_done = 1;
+ wake_up(&mgi->wait);
+ } else if (mgi->irq_status & ADC1INT_ADC1CAL) {
+ mgi->conv_done = 1;
+ wake_up(&mgi->wait);
+ } else {
+ /* coulomb counter should be handled by firmware. Ignore it */
+ dev_dbg(mgi->dev, "coulomb counter is not support\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static int alloc_channel_addr(struct gpadc_info *mgi, int ch)
+{
+ int i;
+ int addr = -EBUSY;
+ int last = 0;
+
+ for (i = 0; i < GPADC_CH_MAX; i++)
+ if (mgi->addr_mask & (1 << i))
+ last = i;
+
+ for (i = 0; i < GPADC_CH_MAX; i++) {
+ if (!(mgi->addr_mask & (1 << i))) {
+ addr = i;
+ mgi->addr_mask |= 1 << i;
+ if (addr > last) {
+ gpadc_clear_bits(ADC1ADDR0+last, MSIC_STOPCH);
+ gpadc_write(ADC1ADDR0+addr, ch|MSIC_STOPCH);
+ } else {
+ gpadc_write(ADC1ADDR0+addr, ch);
+ }
+ break;
+ }
+ }
+ return addr;
+}
+
+static void free_channel_addr(struct gpadc_info *mgi, int addr)
+{
+ int last = 0;
+ int i;
+
+ mgi->addr_mask &= ~(1 << addr);
+ for (i = 0; i < GPADC_CH_MAX; i++)
+ if (mgi->addr_mask & (1 << i))
+ last = i;
+ if (addr > last)
+ gpadc_set_bits(ADC1ADDR0+last, MSIC_STOPCH);
+}
+
+static void gpadc_gsmpulse_work(struct work_struct *work)
+{
+ int i;
+ u8 data;
+ int tmp;
+ int vol, cur;
+ struct gpadc_info *mgi =
+ container_of(work, struct gpadc_info, gsmpulse_work);
+
+ mutex_lock(&mgi->lock);
+ gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+
+ vol = 0;
+ cur = 0;
+ for (i = 0; i < 4; i++) {
+ gpadc_read(ADC1BV0H + i * 2, &data);
+ tmp = data << 2;
+ gpadc_read(ADC1BV0H + i * 2 + 1, &data);
+ tmp += data & 0x3;
+ if (tmp > vol)
+ vol = tmp;
+
+ gpadc_read(ADC1BI0H + i * 2, &data);
+ tmp = data << 2;
+ gpadc_read(ADC1BI0H + i * 2 + 1, &data);
+ tmp += data & 0x3;
+ if (tmp > cur)
+ cur = tmp;
+ }
+
+ vol = gpadc_calib(vol, mgi->vzse, mgi->vge);
+ cur = gpadc_calib(cur, mgi->izse, mgi->ige);
+
+ gpadc_set_bits(ADC1INT, ADC1INT_GSM);
+ gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+ if (mgi->gsmadc_notify)
+ mgi->gsmadc_notify(vol, cur);
+ mutex_unlock(&mgi->lock);
+}
+
+/**
+ * intel_mid_gpadc_gsmpulse_register - power on gsm adc and register a callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_register(void(*fn)(int vol, int cur))
+{
+ int ret = 0;
+ struct gpadc_info *mgi = &gpadc_info;
+
+ if (!mgi->initialized)
+ return -ENODEV;
+ mutex_lock(&mgi->lock);
+ if (!mgi->gsmadc_notify) {
+ gpadc_write(ADC1CNTL2, ADC1CNTL2_DEF);
+ gpadc_set_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+ mgi->gsmadc_notify = fn;
+ } else {
+ ret = -EBUSY;
+ }
+ mutex_unlock(&mgi->lock);
+ return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_register);
+
+/**
+ * intel_mid_gpadc_gsmpulse_unregister - power off gsm adc and unregister
+ * the callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_unregister(void(*fn)(int vol, int cur))
+{
+ int ret = 0;
+ struct gpadc_info *mgi = &gpadc_info;
+
+ if (!mgi->initialized)
+ return -ENODEV;
+ mutex_lock(&mgi->lock);
+ if (mgi->gsmadc_notify == fn) {
+ mgi->gsmadc_notify = NULL;
+ gpadc_clear_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+ }
+ mutex_unlock(&mgi->lock);
+ return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_unregister);
+
+/**
+ * intel_mid_gpadc_sample - do gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @...: sampling resulting arguments of all channels. refer to sscanf.
+ * caller should not access it before return.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...)
+{
+
+ struct gpadc_request *rq = handle;
+ struct gpadc_info *mgi = &gpadc_info;
+ int i;
+ u8 data;
+ int ret = 0;
+ int count;
+ int tmp;
+ int *val[GPADC_CH_MAX];
+ va_list args;
+
+ if (!mgi->initialized)
+ return -ENODEV;
+
+ mutex_lock(&mgi->lock);
+ mgi->pmic_ipc_status = 0;
+
+ va_start(args, sample_count);
+ for (i = 0; i < rq->count; i++) {
+ val[i] = va_arg(args, int*);
+ *val[i] = 0;
+ }
+ va_end(args);
+
+ pm_qos_add_request(&mgi->pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, CSTATE_EXIT_LATENCY_S0i1-1);
+ gpadc_poweron(mgi, rq->vref);
+ gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+ gpadc_read(ADC1CNTL1, &data);
+ data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+ gpadc_write(ADC1CNTL1, data);
+ mgi->rnd_done = 0;
+ gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+ for (count = 0; count < sample_count; count++) {
+ if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+ gpadc_dump(mgi);
+ dev_err(mgi->dev, "sample timeout\n");
+ ret = -ETIMEDOUT;
+ goto fail;
+ }
+ gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+ for (i = 0; i < rq->count; ++i) {
+ tmp = 0;
+ gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+ tmp += data << 2;
+ gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+ tmp += data & 0x3;
+
+ if (rq->ch[i] & CH_NEED_VCALIB)
+ tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+ if (rq->ch[i] & CH_NEED_ICALIB)
+ tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+
+ *val[i] += tmp;
+ }
+ gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+ mgi->rnd_done = 0;
+ }
+
+ for (i = 0; i < rq->count; ++i)
+ *val[i] /= sample_count;
+
+fail:
+ gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+ gpadc_poweroff(mgi);
+ pm_qos_remove_request(&mgi->pm_qos_request);
+
+ if (mgi->pmic_ipc_status) {
+ dev_err(mgi->dev, "sample broken\n");
+ ret = mgi->pmic_ipc_status;
+ }
+ mutex_unlock(&mgi->lock);
+ return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_sample);
+
+/**
+ * get_gpadc_sample() - get gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @buffer: sampling resulting arguments of all channels.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int get_gpadc_sample(void *handle, int sample_count, int *buffer)
+{
+
+ struct gpadc_request *rq = handle;
+ struct gpadc_info *mgi = &gpadc_info;
+ int i;
+ u8 data;
+ int ret = 0;
+ int count;
+ int tmp;
+
+ if (!mgi->initialized)
+ return -ENODEV;
+
+ mutex_lock(&mgi->lock);
+ mgi->pmic_ipc_status = 0;
+
+ for (i = 0; i < rq->count; i++)
+ buffer[i] = 0;
+
+ pm_qos_add_request(&mgi->pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, CSTATE_EXIT_LATENCY_S0i1-1);
+ gpadc_poweron(mgi, rq->vref);
+ gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+ gpadc_read(ADC1CNTL1, &data);
+ data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+ gpadc_write(ADC1CNTL1, data);
+ mgi->rnd_done = 0;
+ gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+ for (count = 0; count < sample_count; count++) {
+ if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+ gpadc_dump(mgi);
+ dev_err(mgi->dev, "sample timeout\n");
+ ret = -ETIMEDOUT;
+ goto fail;
+ }
+ gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+ for (i = 0; i < rq->count; ++i) {
+ tmp = 0;
+ gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+ tmp += data << 2;
+ gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+ tmp += data & 0x3;
+
+ if (rq->ch[i] & CH_NEED_VCALIB)
+ tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+ if (rq->ch[i] & CH_NEED_ICALIB)
+ tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+ buffer[i] += tmp;
+ }
+ gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+ mgi->rnd_done = 0;
+ }
+
+ for (i = 0; i < rq->count; ++i)
+ buffer[i] /= sample_count;
+
+fail:
+ gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+ gpadc_poweroff(mgi);
+ pm_qos_remove_request(&mgi->pm_qos_request);
+ if (mgi->pmic_ipc_status) {
+ dev_err(mgi->dev, "sample broken\n");
+ ret = mgi->pmic_ipc_status;
+ }
+ mutex_unlock(&mgi->lock);
+ return ret;
+}
+EXPORT_SYMBOL(get_gpadc_sample);
+
+/**
+ * intel_mid_gpadc_free - free gpadc
+ * @handle: the gpadc handle
+ *
+ * This function may sleep.
+ */
+void intel_mid_gpadc_free(void *handle)
+{
+ struct gpadc_request *rq = handle;
+ struct gpadc_info *mgi = &gpadc_info;
+ int i;
+
+ mutex_lock(&mgi->lock);
+ mgi->pmic_ipc_status = 0;
+ for (i = 0; i < rq->count; i++)
+ free_channel_addr(mgi, rq->addr[i]);
+
+ if (mgi->pmic_ipc_status)
+ dev_err(mgi->dev, "gpadc free broken\n");
+
+ mutex_unlock(&mgi->lock);
+ kfree(rq);
+}
+EXPORT_SYMBOL(intel_mid_gpadc_free);
+
+/**
+ * intel_mid_gpadc_alloc - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ * flags:
+ * CH_NEED_VCALIB it needs voltage calibration
+ * CH_NEED_ICALIB it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ */
+void *intel_mid_gpadc_alloc(int count, ...)
+{
+ struct gpadc_request *rq;
+ struct gpadc_info *mgi = &gpadc_info;
+ va_list args;
+ int ch;
+ int i;
+
+ if (!mgi->initialized)
+ return NULL;
+
+ rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+ if (rq == NULL)
+ return NULL;
+
+ va_start(args, count);
+ mutex_lock(&mgi->lock);
+ mgi->pmic_ipc_status = 0;
+
+ rq->count = count;
+ for (i = 0; i < count; i++) {
+ ch = va_arg(args, int);
+ rq->ch[i] = ch;
+ if (ch & CH_NEED_VREF)
+ rq->vref = 1;
+ ch &= 0xf;
+ rq->addr[i] = alloc_channel_addr(mgi, ch);
+ if (rq->addr[i] < 0) {
+ dev_err(mgi->dev, "alloc addr failed\n");
+ while (i-- > 0)
+ free_channel_addr(mgi, rq->addr[i]);
+ kfree(rq);
+ rq = NULL;
+ break;
+ }
+ }
+ if (mgi->pmic_ipc_status)
+ dev_err(mgi->dev, "gpadc alloc broken\n");
+
+ mutex_unlock(&mgi->lock);
+ va_end(args);
+
+ return rq;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_alloc);
+
+ /**
+ * gpadc_alloc_channels - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ * flags:
+ * CH_NEED_VCALIB it needs voltage calibration
+ * CH_NEED_ICALIB it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ *
+ * TODO: Cleanup intel_mid_gpadc_alloc() once all its users
+ * are moved to gpadc_alloc_channels()
+ *
+ */
+
+void *gpadc_alloc_channels(int n, int *channel_info)
+{
+ struct gpadc_request *rq;
+ struct gpadc_info *mgi = &gpadc_info;
+ int ch;
+ int i;
+
+ if (!mgi->initialized)
+ return NULL;
+
+ rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+ if (rq == NULL)
+ return NULL;
+
+ mutex_lock(&mgi->lock);
+ mgi->pmic_ipc_status = 0;
+
+ rq->count = n;
+ for (i = 0; i < n; i++) {
+ ch = channel_info[i];
+ rq->ch[i] = ch;
+ if (ch & CH_NEED_VREF)
+ rq->vref = 1;
+ ch &= 0xf;
+ rq->addr[i] = alloc_channel_addr(mgi, ch);
+ if (rq->addr[i] < 0) {
+ dev_err(mgi->dev, "alloc addr failed\n");
+ while (i-- > 0)
+ free_channel_addr(mgi, rq->addr[i]);
+ kfree(rq);
+ rq = NULL;
+ break;
+ }
+ }
+ if (mgi->pmic_ipc_status)
+ dev_err(mgi->dev, "gpadc alloc broken\n");
+
+ mutex_unlock(&mgi->lock);
+
+ return rq;
+}
+EXPORT_SYMBOL(gpadc_alloc_channels);
+
+static ssize_t intel_mid_gpadc_store_alloc_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int val, hdn;
+ int ch[SAMPLE_CH_MAX];
+
+ val = sscanf(buf, "%d %x %x", &hdn, &ch[0], &ch[1]);
+
+ if (val < 2 || val > 3) {
+ dev_err(dev, "invalid number of arguments");
+ return -EINVAL;
+ }
+
+ if (hdn < 1 || hdn > GPADC_CH_MAX) {
+ dev_err(dev, "invalid handle value");
+ return -EINVAL;
+ }
+
+ if (adc_handle[hdn - 1]) {
+ dev_err(dev, "adc handle %d has been occupied", hdn);
+ return -EBUSY;
+ }
+
+ if (val == 2)
+ adc_handle[hdn - 1] = intel_mid_gpadc_alloc(1, ch[0]);
+ else
+ adc_handle[hdn - 1] = intel_mid_gpadc_alloc(2, ch[0], ch[1]);
+
+ if (!adc_handle[hdn - 1]) {
+ dev_err(dev, "allocating adc handle %d failed", hdn);
+ return -ENOMEM;
+ }
+
+ return size;
+}
+
+static ssize_t intel_mid_gpadc_store_free_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int hdn;
+
+ if (sscanf(buf, "%d", &hdn) != 1) {
+ dev_err(dev, "invalid number of argument");
+ return -EINVAL;
+ }
+
+ if (hdn < 1 || hdn > GPADC_CH_MAX) {
+ dev_err(dev, "invalid handle value");
+ return -EINVAL;
+ }
+
+ if (adc_handle[hdn - 1]) {
+ intel_mid_gpadc_free(adc_handle[hdn - 1]);
+ adc_handle[hdn - 1] = NULL;
+ }
+
+ return size;
+}
+
+static ssize_t intel_mid_gpadc_store_sample(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int hdn, spc;
+ int ret;
+ struct gpadc_request *rq;
+
+ if (sscanf(buf, "%d %d", &hdn, &spc) != 2) {
+ dev_err(dev, "invalid number of arguments");
+ return -EINVAL;
+ }
+
+ if (hdn < 1 || hdn > GPADC_CH_MAX) {
+ dev_err(dev, "invalid handle value");
+ return -EINVAL;
+ }
+
+ rq = adc_handle[hdn - 1];
+ if (!rq) {
+ dev_err(dev, "null handle");
+ return -EINVAL;
+ }
+
+ if (rq->count == 1)
+ ret = intel_mid_gpadc_sample(adc_handle[hdn-1],
+ spc, &sample_result[hdn - 1][0]);
+ else
+ ret = intel_mid_gpadc_sample(adc_handle[hdn - 1],
+ spc, &sample_result[hdn - 1][0],
+ &sample_result[hdn - 1][1]);
+
+ if (ret) {
+ dev_err(dev, "sampling failed. adc handle: %d", hdn);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t intel_mid_gpadc_show_sample(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int hdc;
+ int used = 0;
+ struct gpadc_request *rq;
+
+ for (hdc = 0; hdc < GPADC_CH_MAX; hdc++) {
+ if (adc_handle[hdc]) {
+ rq = adc_handle[hdc];
+ if (rq->count == 1)
+ used += snprintf(buf + used, PAGE_SIZE - used,
+ "%d ", sample_result[hdc][0]);
+ else
+ used += snprintf(buf + used, PAGE_SIZE - used,
+ "%d %d ", sample_result[hdc][0],
+ sample_result[hdc][1]);
+ }
+ }
+
+ return used;
+}
+
+
+static void gsmpulse_sysfs_callback(int vol, int cur)
+{
+ vol_val = vol;
+ cur_val = cur;
+ complete(&gsmadc_complete);
+}
+
+static ssize_t intel_mid_gpadc_show_gsmpulse_sample(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ INIT_COMPLETION(gsmadc_complete);
+ intel_mid_gpadc_gsmpulse_register(gsmpulse_sysfs_callback);
+ ret = wait_for_completion_interruptible(&gsmadc_complete);
+ intel_mid_gpadc_gsmpulse_unregister(gsmpulse_sysfs_callback);
+ if (ret)
+ return 0;
+ else
+ return snprintf(buf, PAGE_SIZE, "%d %d", vol_val, cur_val);
+}
+
+static DEVICE_ATTR(alloc_channel, S_IWUSR, NULL,
+ intel_mid_gpadc_store_alloc_channel);
+static DEVICE_ATTR(free_channel, S_IWUSR, NULL,
+ intel_mid_gpadc_store_free_channel);
+static DEVICE_ATTR(sample, S_IRUGO | S_IWUSR,
+ intel_mid_gpadc_show_sample, intel_mid_gpadc_store_sample);
+static DEVICE_ATTR(gsmpulse_sample, S_IRUGO,
+ intel_mid_gpadc_show_gsmpulse_sample, NULL);
+
+static struct attribute *intel_mid_gpadc_attrs[] = {
+ &dev_attr_alloc_channel.attr,
+ &dev_attr_free_channel.attr,
+ &dev_attr_sample.attr,
+ &dev_attr_gsmpulse_sample.attr,
+ NULL,
+};
+
+static struct attribute_group intel_mid_gpadc_attr_group = {
+ .name = "mid_gpadc",
+ .attrs = intel_mid_gpadc_attrs,
+};
+
+static int msic_gpadc_probe(struct platform_device *pdev)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+ struct intel_mid_gpadc_platform_data *pdata = pdev->dev.platform_data;
+ int err = 0;
+
+ mutex_init(&mgi->lock);
+ init_waitqueue_head(&mgi->wait);
+ init_waitqueue_head(&mgi->trimming_wait);
+ mgi->workq = create_singlethread_workqueue(dev_name(&pdev->dev));
+ if (mgi->workq == NULL)
+ return -ENOMEM;
+
+ mgi->dev = &pdev->dev;
+ mgi->intr = ioremap_nocache(pdata->intr, 4);
+ mgi->irq = platform_get_irq(pdev, 0);
+
+ gpadc_clear_bits(IRQLVL1MSK, IRQLVL1MSK_ADCM);
+ if (request_threaded_irq(mgi->irq, msic_gpadc_isr, msic_gpadc_irq,
+ IRQF_ONESHOT, "msic_adc", mgi)) {
+ dev_err(&pdev->dev, "unable to register irq %d\n", mgi->irq);
+ err = -ENODEV;
+ goto err_exit;
+ }
+
+ gpadc_write(ADC1ADDR0, MSIC_STOPCH);
+ INIT_WORK(&mgi->trimming_work, gpadc_trimming);
+ INIT_WORK(&mgi->gsmpulse_work, gpadc_gsmpulse_work);
+ queue_work(mgi->workq, &mgi->trimming_work);
+ wait_event(mgi->trimming_wait, mgi->trimming_start);
+ mgi->initialized = 1;
+
+ init_completion(&gsmadc_complete);
+
+ err = sysfs_create_group(&pdev->dev.kobj,
+ &intel_mid_gpadc_attr_group);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+ err);
+ goto err_release_irq;
+ }
+
+ return 0;
+
+err_release_irq:
+ free_irq(mgi->irq, mgi);
+err_exit:
+ if (mgi->intr)
+ iounmap(mgi->intr);
+ return err;
+}
+
+static int msic_gpadc_remove(struct platform_device *pdev)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+
+ sysfs_remove_group(&pdev->dev.kobj, &intel_mid_gpadc_attr_group);
+ free_irq(mgi->irq, mgi);
+ iounmap(mgi->intr);
+ flush_workqueue(mgi->workq);
+ destroy_workqueue(mgi->workq);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int msic_gpadc_suspend_noirq(struct device *dev)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+
+ /* If the gpadc is locked, it means gpadc is still in active mode. */
+ if (mutex_trylock(&mgi->lock))
+ return 0;
+ else
+ return -EBUSY;
+}
+
+static int msic_gpadc_resume_noirq(struct device *dev)
+{
+ struct gpadc_info *mgi = &gpadc_info;
+
+ mutex_unlock(&mgi->lock);
+ return 0;
+}
+#else
+#define msic_gpadc_suspend_noirq NULL
+#define msic_gpadc_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops msic_gpadc_driver_pm_ops = {
+ .suspend_noirq = msic_gpadc_suspend_noirq,
+ .resume_noirq = msic_gpadc_resume_noirq,
+};
+
+static struct platform_driver msic_gpadc_driver = {
+ .driver = {
+ .name = "msic_adc",
+ .owner = THIS_MODULE,
+ .pm = &msic_gpadc_driver_pm_ops,
+ },
+ .probe = msic_gpadc_probe,
+ .remove = msic_gpadc_remove,
+};
+
+static int msic_gpadc_module_init(void)
+{
+ return platform_driver_register(&msic_gpadc_driver);
+}
+
+static void msic_gpadc_module_exit(void)
+{
+ platform_driver_unregister(&msic_gpadc_driver);
+}
+
+static int msic_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed msic_gpadc rpmsg device\n");
+
+ ret = msic_gpadc_module_init();
+
+out:
+ return ret;
+}
+
+static void msic_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ msic_gpadc_module_exit();
+ dev_info(&rpdev->dev, "Removed msic_gpadc rpmsg device\n");
+}
+
+static void msic_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id msic_adc_rpmsg_id_table[] = {
+ { .name = "rpmsg_msic_adc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, msic_adc_rpmsg_id_table);
+
+static struct rpmsg_driver msic_adc_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = msic_adc_rpmsg_id_table,
+ .probe = msic_adc_rpmsg_probe,
+ .callback = msic_adc_rpmsg_cb,
+ .remove = msic_adc_rpmsg_remove,
+};
+
+static int __init msic_adc_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&msic_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(msic_adc_rpmsg_init);
+#else
+rootfs_initcall(msic_adc_rpmsg_init);
+#endif
+
+static void __exit msic_adc_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&msic_adc_rpmsg);
+}
+module_exit(msic_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield MSIC GPADC Driver");
+MODULE_LICENSE("GPL");
devices such as DaVinci NIC.
For details please see http://www.ti.com/davinci
+config I2C_DESIGNWARE_CORE_FORK
+ tristate "Synopsys DesignWare Controller"
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare adapter core. Only master mode is supported.
+ You need choose platform or pci driver for its driver support.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware-core.
+
+config I2C_DESIGNWARE_PCI_FORK
+ tristate "Synopsys DesignWare PCI"
+ depends on PCI && I2C_DESIGNWARE_CORE_FORK
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware-pci.
+
+config I2C_DESIGNWARE_PLATFORM_FORK
+ tristate "Synopsys DesignWare Platform"
+ depends on I2C_DESIGNWARE_CORE_FORK
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware-platform.
+
+config I2C_DW_SPEED_MODE_DEBUG
+ bool "Designware I2C Speed Mode Debug"
+ depends on I2C_DESIGNWARE_CORE_FORK
+ help
+ If you say yes to this option, you could runtime change the I2C
+ controller bus speed mode.
+
+config I2C_PMIC
+ bool "PMIC I2C Adapter"
+ depends on INTEL_SCU_IPC
+ help
+ Say Y here if you have PMIC I2C adapter.
+
+ PMIC-I2C adapter driver to handle I2C transactions
+ in the PMIC's I2C bus.
+
config I2C_DESIGNWARE_CORE
tristate
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
+obj-$(CONFIG_I2C_DESIGNWARE_CORE_FORK) += i2c-designware-core.o
+obj-$(CONFIG_I2C_DESIGNWARE_PCI_FORK) += i2c-designware-pci.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o
+obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM_FORK) += i2c-designware-platform.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o
+
+obj-$(CONFIG_I2C_PMIC) += i2c-pmic.o
+
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
* ----------------------------------------------------------------------------
*
*/
-#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/nmi.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
#include "i2c-designware-core.h"
-/*
- * Registers offset
- */
-#define DW_IC_CON 0x0
-#define DW_IC_TAR 0x4
-#define DW_IC_DATA_CMD 0x10
-#define DW_IC_SS_SCL_HCNT 0x14
-#define DW_IC_SS_SCL_LCNT 0x18
-#define DW_IC_FS_SCL_HCNT 0x1c
-#define DW_IC_FS_SCL_LCNT 0x20
-#define DW_IC_INTR_STAT 0x2c
-#define DW_IC_INTR_MASK 0x30
-#define DW_IC_RAW_INTR_STAT 0x34
-#define DW_IC_RX_TL 0x38
-#define DW_IC_TX_TL 0x3c
-#define DW_IC_CLR_INTR 0x40
-#define DW_IC_CLR_RX_UNDER 0x44
-#define DW_IC_CLR_RX_OVER 0x48
-#define DW_IC_CLR_TX_OVER 0x4c
-#define DW_IC_CLR_RD_REQ 0x50
-#define DW_IC_CLR_TX_ABRT 0x54
-#define DW_IC_CLR_RX_DONE 0x58
-#define DW_IC_CLR_ACTIVITY 0x5c
-#define DW_IC_CLR_STOP_DET 0x60
-#define DW_IC_CLR_START_DET 0x64
-#define DW_IC_CLR_GEN_CALL 0x68
-#define DW_IC_ENABLE 0x6c
-#define DW_IC_STATUS 0x70
-#define DW_IC_TXFLR 0x74
-#define DW_IC_RXFLR 0x78
-#define DW_IC_TX_ABRT_SOURCE 0x80
-#define DW_IC_ENABLE_STATUS 0x9c
-#define DW_IC_COMP_PARAM_1 0xf4
-#define DW_IC_COMP_TYPE 0xfc
-#define DW_IC_COMP_TYPE_VALUE 0x44570140
-
-#define DW_IC_INTR_RX_UNDER 0x001
-#define DW_IC_INTR_RX_OVER 0x002
-#define DW_IC_INTR_RX_FULL 0x004
-#define DW_IC_INTR_TX_OVER 0x008
-#define DW_IC_INTR_TX_EMPTY 0x010
-#define DW_IC_INTR_RD_REQ 0x020
-#define DW_IC_INTR_TX_ABRT 0x040
-#define DW_IC_INTR_RX_DONE 0x080
-#define DW_IC_INTR_ACTIVITY 0x100
-#define DW_IC_INTR_STOP_DET 0x200
-#define DW_IC_INTR_START_DET 0x400
-#define DW_IC_INTR_GEN_CALL 0x800
-
-#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
- DW_IC_INTR_TX_EMPTY | \
- DW_IC_INTR_TX_ABRT | \
- DW_IC_INTR_STOP_DET)
-
-#define DW_IC_STATUS_ACTIVITY 0x1
-
-#define DW_IC_ERR_TX_ABRT 0x1
-
-/*
- * status codes
- */
-#define STATUS_IDLE 0x0
-#define STATUS_WRITE_IN_PROGRESS 0x1
-#define STATUS_READ_IN_PROGRESS 0x2
-
-#define TIMEOUT 20 /* ms */
-
-/*
- * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
- *
- * only expected abort codes are listed here
- * refer to the datasheet for the full list
- */
-#define ABRT_7B_ADDR_NOACK 0
-#define ABRT_10ADDR1_NOACK 1
-#define ABRT_10ADDR2_NOACK 2
-#define ABRT_TXDATA_NOACK 3
-#define ABRT_GCALL_NOACK 4
-#define ABRT_GCALL_READ 5
-#define ABRT_SBYTE_ACKDET 7
-#define ABRT_SBYTE_NORSTRT 9
-#define ABRT_10B_RD_NORSTRT 10
-#define ABRT_MASTER_DIS 11
-#define ARB_LOST 12
-
-#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK)
-#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK)
-#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK)
-#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK)
-#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK)
-#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ)
-#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET)
-#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT)
-#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT)
-#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS)
-#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST)
-
-#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \
- DW_IC_TX_ABRT_10ADDR1_NOACK | \
- DW_IC_TX_ABRT_10ADDR2_NOACK | \
- DW_IC_TX_ABRT_TXDATA_NOACK | \
- DW_IC_TX_ABRT_GCALL_NOACK)
+int i2c_dw_init(struct dw_i2c_dev *dev);
+int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
+u32 i2c_dw_func(struct i2c_adapter *adap);
+void i2c_dw_enable(struct dw_i2c_dev *dev);
+void i2c_dw_disable(struct dw_i2c_dev *dev);
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
+void i2c_dw_disable_int(struct dw_i2c_dev *dev);
+void i2c_dw_clear_int(struct dw_i2c_dev *dev);
static char *abort_sources[] = {
[ABRT_7B_ADDR_NOACK] =
u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
- u32 value;
-
- if (dev->accessor_flags & ACCESS_16BIT)
- value = readw(dev->base + offset) |
- (readw(dev->base + offset + 2) << 16);
- else
- value = readl(dev->base + offset);
+ u32 value = readl(dev->base + offset);
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->swab)
return swab32(value);
else
return value;
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->swab)
b = swab32(b);
- if (dev->accessor_flags & ACCESS_16BIT) {
- writew((u16)b, dev->base + offset);
- writew((u16)(b >> 16), dev->base + offset + 2);
+ writel(b, dev->base + offset);
+}
+
+static void i2c_dw_dump(struct dw_i2c_dev *dev)
+{
+ u32 value;
+
+ dev_err(dev->dev, "===== REGISTER DUMP (i2c) =====\n");
+ value = dw_readl(dev, DW_IC_CON);
+ dev_err(dev->dev, "DW_IC_CON: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_TAR);
+ dev_err(dev->dev, "DW_IC_TAR: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_SS_SCL_HCNT);
+ dev_err(dev->dev, "DW_IC_SS_SCL_HCNT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_SS_SCL_LCNT);
+ dev_err(dev->dev, "DW_IC_SS_SCL_LCNT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_FS_SCL_HCNT);
+ dev_err(dev->dev, "DW_IC_FS_SCL_HCNT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_FS_SCL_LCNT);
+ dev_err(dev->dev, "DW_IC_FS_SCL_LCNT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_INTR_STAT);
+ dev_err(dev->dev, "DW_IC_INTR_STAT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_INTR_MASK);
+ dev_err(dev->dev, "DW_IC_INTR_MASK: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+ dev_err(dev->dev, "DW_IC_RAW_INTR_STAT: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_RX_TL);
+ dev_err(dev->dev, "DW_IC_RX_TL: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_TX_TL);
+ dev_err(dev->dev, "DW_IC_TX_TL: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_ENABLE);
+ dev_err(dev->dev, "DW_IC_ENABLE: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_STATUS);
+ dev_err(dev->dev, "DW_IC_STATUS: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_TXFLR);
+ dev_err(dev->dev, "DW_IC_TXFLR: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_RXFLR);
+ dev_err(dev->dev, "DW_IC_RXFLR: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+ dev_err(dev->dev, "DW_IC_TX_ABRT_SOURCE: 0x%x\n", value);
+ value = dw_readl(dev, DW_IC_DATA_CMD);
+ dev_err(dev->dev, "DW_IC_DATA_CMD: 0x%x\n", value);
+ dev_err(dev->dev, "===============================\n");
+}
+
+/* VLV2 PCI config space memio access to the controller is
+* enabled by
+* 1. Reset 0x804 and 0x808 offset from base address.
+* 2. Set 0x804 offset from base address to 0x3.
+*/
+static void vlv2_reset(struct dw_i2c_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ dw_writel(dev, 0, 0x804);
+ dw_writel(dev, 0, 0x808);
+ usleep_range(10, 100);
+
+ dw_writel(dev, 3, 0x804);
+ usleep_range(10, 100);
+
+ if (dw_readl(dev, DW_IC_COMP_TYPE) != DW_IC_COMP_TYPE_VALUE)
+ continue;
+
+ return;
+ }
+
+ dev_warn(dev->dev, "vlv2 I2C reset failed\n");
+}
+
+static int mfld_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+ dw_writel(dev, PNW_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, PNW_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+ dw_writel(dev, PNW_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, PNW_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+ return 0;
+}
+
+static int ctp_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+ dw_writel(dev, CLV_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, CLV_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+ dw_writel(dev, CLV_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, CLV_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+ return 0;
+}
+
+static int merr_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+ dw_writel(dev, MERR_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, MERR_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+ dw_writel(dev, MERR_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, MERR_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+ dw_writel(dev, MERR_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, MERR_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+ return 0;
+}
+
+static int vlv2_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+ dw_writel(dev, VLV2_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, VLV2_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+ dw_writel(dev, VLV2_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, VLV2_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+ dw_writel(dev, VLV2_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, VLV2_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+ return 0;
+}
+
+static struct dw_controller dw_controllers[] = {
+ [moorestown_0] = {
+ .bus_num = 0,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [moorestown_1] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [moorestown_2] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .clk_khz = 25000,
+ },
+ [medfield_0] = {
+ .bus_num = 0,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+ [medfield_1] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 20500,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+ [medfield_2] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+ [medfield_3] = {
+ .bus_num = 3,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 20500,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+ [medfield_4] = {
+ .bus_num = 4,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+ [medfield_5] = {
+ .bus_num = 5,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = mfld_i2c_scl_cfg,
+ },
+
+ [cloverview_0] = {
+ .bus_num = 0,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+ [cloverview_1] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+ [cloverview_2] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+ [cloverview_3] = {
+ .bus_num = 3,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 20500,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+ [cloverview_4] = {
+ .bus_num = 4,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+ [cloverview_5] = {
+ .bus_num = 5,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 256,
+ .rx_fifo_depth = 256,
+ .clk_khz = 17000,
+ .scl_cfg = ctp_i2c_scl_cfg,
+ },
+
+ [merrifield_0] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_1] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_2] = {
+ .bus_num = 3,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_3] = {
+ .bus_num = 4,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_4] = {
+ .bus_num = 5,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_5] = {
+ .bus_num = 6,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [merrifield_6] = {
+ .bus_num = 7,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = merr_i2c_scl_cfg,
+ },
+ [valleyview_0] = {
+ .bus_num = 1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C1"
+ },
+ [valleyview_1] = {
+ .bus_num = 2,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C2"
+ },
+ [valleyview_2] = {
+ .bus_num = 3,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C3"
+ },
+ [valleyview_3] = {
+ .bus_num = 4,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C4"
+ },
+ [valleyview_4] = {
+ .bus_num = 5,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C5"
+ },
+ [valleyview_5] = {
+ .bus_num = 6,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C6"
+ },
+ [valleyview_6] = {
+ .bus_num = 7,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .enable_stop = 1,
+ .scl_cfg = vlv2_i2c_scl_cfg,
+ .reset = vlv2_reset,
+ .share_irq = 1,
+ .acpi_name = "\\_SB.I2C7"
+ }
+};
+
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime)
+{
+ if (runtime)
+ i2c_dw_disable(dev);
+ else {
+ if (down_trylock(&dev->lock))
+ return -EBUSY;
+ i2c_dw_disable(dev);
+ dev->status &= ~STATUS_POWERON;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(i2c_dw_suspend);
+
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime)
+{
+ i2c_dw_init(dev);
+ if (!runtime) {
+ dev->status |= STATUS_POWERON;
+ up(&dev->lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(i2c_dw_resume);
+
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return dev->controller->clk_khz;
+}
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+static ssize_t show_bus_num(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", i2c->controller->bus_num);
+}
+
+#define MODE_NAME_SIZE 10
+
+static ssize_t store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+ int ret = 0;
+ char mode[MODE_NAME_SIZE];
+
+ if (sscanf(buf, "%9s", mode) != 1) {
+ dev_err(dev, "input I2C speed mode: std/fast\n");
+ return -EINVAL;
+ }
+
+ down(&i2c->lock);
+ pm_runtime_get_sync(i2c->dev);
+
+ if (!strncmp("std", mode, MODE_NAME_SIZE)) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+ } else if (!strncmp("fast", mode, MODE_NAME_SIZE)) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+ } else if (!strncmp("high", mode, MODE_NAME_SIZE)) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
} else {
- writel(b, dev->base + offset);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* init to configure the i2c master */
+ i2c_dw_init(i2c);
+
+ dev_info(dev, "I2C speed mode changed to %s\n", mode);
+
+out:
+ pm_runtime_mark_last_busy(i2c->dev);
+ pm_runtime_put_autosuspend(i2c->dev);
+ up(&i2c->lock);
+
+ return (ret < 0) ? ret : size;
+}
+
+static ssize_t show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ switch (i2c->master_cfg & DW_IC_SPEED_MASK) {
+ case DW_IC_CON_SPEED_STD:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "std");
+ break;
+ case DW_IC_CON_SPEED_FAST:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "fast");
+ break;
+ case DW_IC_CON_SPEED_HIGH:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "high");
+ break;
+ default:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "Not Supported\n");
+ break;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR(bus_num, S_IRUGO, show_bus_num, NULL);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode);
+
+static struct attribute *i2c_dw_attrs[] = {
+ &dev_attr_bus_num.attr,
+ &dev_attr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_dw_attr_group = {
+ .name = "i2c_dw_sysnode",
+ .attrs = i2c_dw_attrs,
+};
+#endif
+
+static ssize_t store_lock_xfer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dw_i2c_dev *i2c = dev_get_drvdata(dev->parent);
+ ssize_t status = -EINVAL;
+ long lock;
+
+
+ status = kstrtol(buf, 0, &lock);
+ if (status == 0) {
+ if (lock && !i2c->lock_flag) {
+ down(&i2c->lock);
+ pm_runtime_get_sync(i2c->dev);
+ i2c->lock_flag = 1;
+ dev_info(dev, "lock i2c xfer\n");
+ } else if (!lock && i2c->lock_flag) {
+ pm_runtime_mark_last_busy(i2c->dev);
+ pm_runtime_put_autosuspend(i2c->dev);
+ i2c->lock_flag = 0;
+ up(&i2c->lock);
+ dev_info(dev, "unlock i2c xfer\n");
+ } else
+ return -EINVAL;
}
+
+ return status ? : size;
}
+static DEVICE_ATTR(lock_xfer, S_IWUSR, NULL, store_lock_xfer);
+
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+ unsigned long start, unsigned long len, int irq)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ void __iomem *base;
+ struct dw_controller *controller;
+ int r;
+
+ if (bus_idx >= ARRAY_SIZE(dw_controllers)) {
+ dev_err(pdev, "invalid bus index %d\n",
+ bus_idx);
+ r = -EINVAL;
+ goto exit;
+ }
+
+ controller = &dw_controllers[bus_idx];
+
+ base = ioremap_nocache(start, len);
+ if (!base) {
+ dev_err(pdev, "I/O memory remapping failed\n");
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ init_completion(&dev->cmd_complete);
+ sema_init(&dev->lock, 1);
+ dev->status = STATUS_IDLE;
+ dev->clk = NULL;
+ dev->controller = controller;
+ dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+ dev->base = base;
+ dev->dev = get_device(pdev);
+ dev->functionality =
+ I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->master_cfg = controller->bus_cfg;
+ dev->get_scl_cfg = controller->scl_cfg;
+ dev->enable_stop = controller->enable_stop;
+ dev->clk_khz = controller->clk_khz;
+ dev->speed_cfg = dev->master_cfg & DW_IC_SPEED_MASK;
+ dev->use_dyn_clk = 0;
+ dev->reset = controller->reset;
+ dev->irq = irq;
+ dev->share_irq = controller->share_irq;
+ dev->abort = intel_mid_dw_i2c_abort;
+ dev->tx_fifo_depth = controller->tx_fifo_depth;
+ dev->rx_fifo_depth = controller->rx_fifo_depth;
+
+ r = i2c_dw_init(dev);
+ if (r)
+ goto err_kfree;
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = 0;
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = pdev;
+ adap->nr = controller->bus_num;
+ snprintf(adap->name, sizeof(adap->name), "i2c-designware-%d",
+ adap->nr);
+
+ r = request_irq(irq, i2c_dw_isr, IRQF_SHARED, adap->name, dev);
+ if (r) {
+ dev_err(pdev, "failure requesting irq %i\n", irq);
+ goto err_kfree;
+ }
+
+ i2c_dw_disable_int(dev);
+ i2c_dw_clear_int(dev);
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(pdev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+ r = sysfs_create_group(&pdev->kobj, &i2c_dw_attr_group);
+ if (r) {
+ dev_err(pdev,
+ "Unable to export sysfs interface, error: %d\n", r);
+ goto err_del_adap;
+ }
+#endif
+ r = device_create_file(&adap->dev, &dev_attr_lock_xfer);
+ if (r < 0)
+ dev_err(&adap->dev,
+ "Failed to add lock_xfer sysfs files: %d\n", r);
+
+ return dev;
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+err_del_adap:
+ i2c_del_adapter(&dev->adapter);
+#endif
+err_free_irq:
+ free_irq(irq, dev);
+err_kfree:
+ put_device(pdev);
+ kfree(dev);
+err_iounmap:
+ iounmap(base);
+exit:
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL(i2c_dw_setup);
+
+#ifdef CONFIG_ACPI
+static int acpi_i2c_get_freq(struct acpi_resource *ares,
+ void *data)
+{
+ struct dw_i2c_dev *i2c = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ i2c->freq = sb->connection_speed;
+ if (i2c->freq == DW_STD_SPEED) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+ } else if (i2c->freq == DW_FAST_SPEED) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+ } else if (i2c->freq == DW_HIGH_SPEED) {
+ i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+ i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
+ }
+
+ down(&i2c->lock);
+ i2c_dw_init(i2c);
+ up(&i2c->lock);
+ }
+ }
+
+ return 1;
+}
+
+static acpi_status acpi_i2c_find_device_speed(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct dw_i2c_dev *i2c = data;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long sta = 0;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_get_freq, i2c);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ return AE_OK;
+
+ pr_debug("i2c device: %s, freq: %dkHz\n",
+ dev_name(&adev->dev), i2c->freq/1000);
+
+ return AE_OK;
+}
+
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev)
+{
+ acpi_handle pdev_handle = ACPI_HANDLE(pdev);
+ acpi_handle handle = NULL;
+ acpi_status status;
+
+ if (pdev_handle) {
+ handle = pdev_handle;
+ } else if (dev->controller->acpi_name) {
+ acpi_get_handle(NULL,
+ dev->controller->acpi_name, &handle);
+
+ ACPI_HANDLE_SET(pdev, handle);
+ }
+
+ if (handle == NULL)
+ return;
+
+ acpi_i2c_register_devices(&dev->adapter);
+
+ /* Find I2C adapter bus frequency */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_find_device_speed, NULL,
+ dev, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(pdev, "failed to get I2C bus freq\n");
+
+ /* Set the handle back to its raw value */
+ ACPI_HANDLE_SET(pdev, pdev_handle);
+}
+#else
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev) { }
+#endif
+EXPORT_SYMBOL(i2c_acpi_devices_setup);
+
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev)
+{
+ struct i2c_adapter *adap = &dev->adapter;
+
+ i2c_dw_disable(dev);
+
+ device_remove_file(&adap->dev, &dev_attr_lock_xfer);
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+ sysfs_remove_group(&pdev->kobj, &i2c_dw_attr_group);
+#endif
+
+ i2c_del_adapter(&dev->adapter);
+ put_device(pdev);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+}
+EXPORT_SYMBOL(i2c_dw_free);
+
static u32
i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
}
-static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
-{
- int timeout = 100;
-
- do {
- dw_writel(dev, enable, DW_IC_ENABLE);
- if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
- return;
-
- /*
- * Wait 10 times the signaling period of the highest I2C
- * transfer supported by the driver (for 400KHz this is
- * 25us) as described in the DesignWare I2C databook.
- */
- usleep_range(25, 250);
- } while (timeout--);
-
- dev_warn(dev->dev, "timeout in %sabling adapter\n",
- enable ? "en" : "dis");
-}
-
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
u32 hcnt, lcnt;
u32 reg;
+ if (dev->reset)
+ dev->reset(dev);
+
input_clock_khz = dev->get_clk_rate_khz(dev);
+ /* Configure register endianess access */
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianess access */
- dev->accessor_flags |= ACCESS_SWAP;
- } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
- /* Configure register access mode 16bit */
- dev->accessor_flags |= ACCESS_16BIT;
- } else if (reg != DW_IC_COMP_TYPE_VALUE) {
+ dev->swab = 1;
+ reg = DW_IC_COMP_TYPE_VALUE;
+ }
+
+ if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
return -ENODEV;
}
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
-
- /* set standard and fast speed deviders for high/low periods */
-
- /* Standard-mode */
- hcnt = i2c_dw_scl_hcnt(input_clock_khz,
- 40, /* tHD;STA = tHIGH = 4.0 us */
- 3, /* tf = 0.3 us */
- 0, /* 0: DW default, 1: Ideal */
- 0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(input_clock_khz,
- 47, /* tLOW = 4.7 us */
- 3, /* tf = 0.3 us */
- 0); /* No offset */
- dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
- dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
-
- /* Fast-mode */
- hcnt = i2c_dw_scl_hcnt(input_clock_khz,
- 6, /* tHD;STA = tHIGH = 0.6 us */
- 3, /* tf = 0.3 us */
- 0, /* 0: DW default, 1: Ideal */
- 0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(input_clock_khz,
- 13, /* tLOW = 1.3 us */
- 3, /* tf = 0.3 us */
- 0); /* No offset */
- dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
- dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ i2c_dw_disable(dev);
+
+ if (dev->get_scl_cfg)
+ dev->get_scl_cfg(dev);
+ else {
+ /* set standard and fast speed deviders for high/low periods */
+
+ /* Standard-mode */
+ hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+ 227, /* tHD;STA = tHIGH = 22.7 us */
+ 3, /* tf = 0.3 us */
+ 0, /* 0: DW default, 1: Ideal */
+ 23); /* offset = 23 */
+ lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+ 227, /* tLOW = 22.7 us */
+ 3, /* tf = 0.3 us */
+ 28); /* offset = 28 */
+ dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
+ dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n",
+ hcnt, lcnt);
+
+ /* Fast-mode */
+ hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+ 52, /* tHD;STA = tHIGH = 5.2 us */
+ 3, /* tf = 0.3 us */
+ 0, /* 0: DW default, 1: Ideal */
+ 11); /* offset = 11 */
+ lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+ 72, /* tLOW = 7.2 us */
+ 3, /* tf = 0.3 us */
+ 12); /* offset = 12 */
+ dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
+ dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ }
/* Configure Tx/Rx FIFO threshold levels */
- dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
- dw_writel(dev, 0, DW_IC_RX_TL);
+ dw_writel(dev, dev->tx_fifo_depth/2, DW_IC_TX_TL);
+ dw_writel(dev, dev->rx_fifo_depth/2, DW_IC_RX_TL);
/* configure the i2c master */
dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
return 0;
}
-EXPORT_SYMBOL_GPL(i2c_dw_init);
+EXPORT_SYMBOL(i2c_dw_init);
/*
* Waiting for bus not busy
return -ETIMEDOUT;
}
timeout--;
- usleep_range(1000, 1100);
+ mdelay(1);
}
return 0;
u32 ic_con;
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ i2c_dw_disable(dev);
/* set the slave (target) address */
dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
dw_writel(dev, ic_con, DW_IC_CON);
/* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ i2c_dw_enable(dev);
/* Clear and enable interrupts */
i2c_dw_clear_int(dev);
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
-static void
+void
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 intr_mask;
int tx_limit, rx_limit;
+ int cmd;
u32 addr = msgs[dev->msg_write_idx].addr;
u32 buf_len = dev->tx_buf_len;
u8 *buf = dev->tx_buf;
+ unsigned long flags;
intr_mask = DW_IC_INTR_DEFAULT_MASK;
+ raw_local_irq_save(flags);
+ /* if fifo only has one byte, it is not safe */
+ if (!dev->enable_stop && (dev->status & STATUS_WRITE_IN_PROGRESS) &&
+ (dw_readl(dev, DW_IC_TXFLR) < 1)) {
+ dev_err(dev->dev, "TX FIFO underrun, addr: 0x%x.\n", addr);
+ dev->msg_err = -EAGAIN;
+ }
+
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+ if (dev->msg_err)
+ break;
+
/*
* if target address has changed, we need to
* reprogram the target address in the i2c
rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
- u32 cmd = 0;
-
- /*
- * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
- * manually set the stop bit. However, it cannot be
- * detected from the registers so we set it always
- * when writing/reading the last byte.
- */
- if (dev->msg_write_idx == dev->msgs_num - 1 &&
- buf_len == 1)
- cmd |= BIT(9);
-
+ cmd = (dev->enable_stop && buf_len == 1
+ && dev->msg_write_idx == dev->msgs_num - 1) ?
+ DW_IC_CMD_STOP : 0;
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
-
- /* avoid rx buffer overrun */
- if (rx_limit - dev->rx_outstanding <= 0)
- break;
-
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
- dev->rx_outstanding++;
} else
dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
} else
dev->status &= ~STATUS_WRITE_IN_PROGRESS;
}
+ raw_local_irq_restore(flags);
/*
* If i2c_msg index search is completed, we don't need TX_EMPTY
rx_valid = dw_readl(dev, DW_IC_RXFLR);
- for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--)
*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
- dev->rx_outstanding--;
- }
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
int ret;
+ unsigned long timeout;
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
- mutex_lock(&dev->lock);
+ down(&dev->lock);
pm_runtime_get_sync(dev->dev);
INIT_COMPLETION(dev->cmd_complete);
dev->msg_err = 0;
dev->status = STATUS_IDLE;
dev->abort_source = 0;
- dev->rx_outstanding = 0;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
i2c_dw_xfer_init(dev);
/* wait for tx to complete */
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
- if (ret == 0) {
- dev_err(dev->dev, "controller timed out\n");
+ timeout = wait_for_completion_timeout(&dev->cmd_complete, 3*HZ);
+ if (timeout == 0) {
+ dev_WARN(dev->dev, "controller timed out\n");
+ i2c_dw_dump(dev);
+ trigger_all_cpu_backtrace();
+ if (dev->abort)
+ dev->abort(adap->nr);
i2c_dw_init(dev);
ret = -ETIMEDOUT;
goto done;
- } else if (ret < 0)
- goto done;
+ }
if (dev->msg_err) {
ret = dev->msg_err;
/* no error */
if (likely(!dev->cmd_err)) {
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ i2c_dw_disable(dev);
ret = num;
goto done;
}
done:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- mutex_unlock(&dev->lock);
+ up(&dev->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(i2c_dw_xfer);
u32 i2c_dw_func(struct i2c_adapter *adap)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
return dev->functionality;
}
-EXPORT_SYMBOL_GPL(i2c_dw_func);
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
struct dw_i2c_dev *dev = dev_id;
u32 stat, enabled;
+ pm_runtime_get(dev->dev);
+#ifdef CONFIG_PM_RUNTIME
+ if (!pm_runtime_active(dev->dev)) {
+ pm_runtime_put_autosuspend(dev->dev);
+ if (dev->share_irq)
+ return IRQ_NONE;
+ else
+ return IRQ_HANDLED;
+ }
+#endif
enabled = dw_readl(dev, DW_IC_ENABLE);
stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
dev_dbg(dev->dev, "%s: %s enabled= 0x%x stat=0x%x\n", __func__,
dev->adapter.name, enabled, stat);
- if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
- return IRQ_NONE;
+ if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) {
+ pm_runtime_put_autosuspend(dev->dev);
+ if (dev->share_irq)
+ return IRQ_NONE;
+ else
+ return IRQ_HANDLED;
+ }
stat = i2c_dw_read_clear_intrbits(dev);
+ if (stat & DW_IC_INTR_RX_OVER)
+ dev_warn(dev->dev, "RX fifo overrun\n");
+
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ || dev->msg_err) {
+ /*
+ * Check DW_IC_RXFLR register,
+ * read from the RX FIFO if it's not empty.
+ */
+ if ((stat & DW_IC_INTR_STOP_DET) &&
+ dw_readl(dev, DW_IC_RXFLR) > 0)
+ i2c_dw_read(dev);
+
complete(&dev->cmd_complete);
+ }
+ pm_runtime_put_autosuspend(dev->dev);
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(i2c_dw_isr);
-void i2c_dw_enable(struct dw_i2c_dev *dev)
+u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
{
- /* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ return dw_readl(dev, DW_IC_ENABLE_STATUS);
}
-EXPORT_SYMBOL_GPL(i2c_dw_enable);
-u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
+{
+ int timeout = 100;
+
+ do {
+ dw_writel(dev, enable, DW_IC_ENABLE);
+ if (i2c_dw_is_enabled(dev) == enable)
+ return;
+
+ usleep_range(25, 250);
+ } while (timeout-- > 0);
+
+ dev_warn(dev->dev, "timeout in %sabling adapter\n",
+ enable ? "en" : "dis");
+}
+
+void i2c_dw_enable(struct dw_i2c_dev *dev)
{
- return dw_readl(dev, DW_IC_ENABLE);
+ /* Enable the adapter */
+ __i2c_dw_enable(dev, true);
}
-EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
dw_writel(dev, 0, DW_IC_INTR_MASK);
dw_readl(dev, DW_IC_CLR_INTR);
}
-EXPORT_SYMBOL_GPL(i2c_dw_disable);
void i2c_dw_clear_int(struct dw_i2c_dev *dev)
{
dw_readl(dev, DW_IC_CLR_INTR);
}
-EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
void i2c_dw_disable_int(struct dw_i2c_dev *dev)
{
dw_writel(dev, 0, DW_IC_INTR_MASK);
}
-EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
{
return dw_readl(dev, DW_IC_COMP_PARAM_1);
}
-EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
-
-MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
-MODULE_LICENSE("GPL");
*
*/
+/*
+ * Registers offset
+ */
+#define DW_IC_CON 0x0
+#define DW_IC_TAR 0x4
+#define DW_IC_DATA_CMD 0x10
+#define DW_IC_SS_SCL_HCNT 0x14
+#define DW_IC_SS_SCL_LCNT 0x18
+#define DW_IC_FS_SCL_HCNT 0x1c
+#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_HS_SCL_HCNT 0x24
+#define DW_IC_HS_SCL_LCNT 0x28
+#define DW_IC_INTR_STAT 0x2c
+#define DW_IC_INTR_MASK 0x30
+#define DW_IC_RAW_INTR_STAT 0x34
+#define DW_IC_RX_TL 0x38
+#define DW_IC_TX_TL 0x3c
+#define DW_IC_CLR_INTR 0x40
+#define DW_IC_CLR_RX_UNDER 0x44
+#define DW_IC_CLR_RX_OVER 0x48
+#define DW_IC_CLR_TX_OVER 0x4c
+#define DW_IC_CLR_RD_REQ 0x50
+#define DW_IC_CLR_TX_ABRT 0x54
+#define DW_IC_CLR_RX_DONE 0x58
+#define DW_IC_CLR_ACTIVITY 0x5c
+#define DW_IC_CLR_STOP_DET 0x60
+#define DW_IC_CLR_START_DET 0x64
+#define DW_IC_CLR_GEN_CALL 0x68
+#define DW_IC_ENABLE 0x6c
+#define DW_IC_STATUS 0x70
+#define DW_IC_TXFLR 0x74
+#define DW_IC_RXFLR 0x78
+#define DW_IC_TX_ABRT_SOURCE 0x80
+#define DW_IC_ENABLE_STATUS 0x9c
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_COMP_TYPE 0xfc
+#define DW_IC_COMP_TYPE_VALUE 0x44570140
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_SPEED_HIGH 0x6
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
#define DW_IC_CON_SLAVE_DISABLE 0x40
+#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
+ DW_IC_CON_SLAVE_DISABLE | \
+ DW_IC_CON_RESTART_EN)
+
+#define DW_IC_INTR_RX_UNDER 0x001
+#define DW_IC_INTR_RX_OVER 0x002
+#define DW_IC_INTR_RX_FULL 0x004
+#define DW_IC_INTR_TX_OVER 0x008
+#define DW_IC_INTR_TX_EMPTY 0x010
+#define DW_IC_INTR_RD_REQ 0x020
+#define DW_IC_INTR_TX_ABRT 0x040
+#define DW_IC_INTR_RX_DONE 0x080
+#define DW_IC_INTR_ACTIVITY 0x100
+#define DW_IC_INTR_STOP_DET 0x200
+#define DW_IC_INTR_START_DET 0x400
+#define DW_IC_INTR_GEN_CALL 0x800
+
+#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
+ DW_IC_INTR_TX_EMPTY | \
+ DW_IC_INTR_TX_ABRT | \
+ DW_IC_INTR_STOP_DET | \
+ DW_IC_INTR_RX_OVER)
+
+#define DW_IC_STATUS_ACTIVITY 0x1
+
+#define DW_IC_ERR_TX_ABRT 0x1
+
+#define DW_IC_CMD_STOP 0x200
+
+#define DW_IC_SPEED_MASK 0x6
+
+/*
+ * status codes
+ */
+#define STATUS_POWERON 0x0
+#define STATUS_IDLE STATUS_POWERON
+#define STATUS_WRITE_IN_PROGRESS 0x1
+#define STATUS_READ_IN_PROGRESS 0x2
+
+#define TIMEOUT 20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK 0
+#define ABRT_10ADDR1_NOACK 1
+#define ABRT_10ADDR2_NOACK 2
+#define ABRT_TXDATA_NOACK 3
+#define ABRT_GCALL_NOACK 4
+#define ABRT_GCALL_READ 5
+#define ABRT_SBYTE_ACKDET 7
+#define ABRT_SBYTE_NORSTRT 9
+#define ABRT_10B_RD_NORSTRT 10
+#define ABRT_MASTER_DIS 11
+#define ARB_LOST 12
+
+#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK)
+#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK)
+#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK)
+#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK)
+#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK)
+#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ)
+#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET)
+#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT)
+#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT)
+#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS)
+#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST)
+#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \
+ DW_IC_TX_ABRT_10ADDR1_NOACK | \
+ DW_IC_TX_ABRT_10ADDR2_NOACK | \
+ DW_IC_TX_ABRT_TXDATA_NOACK | \
+ DW_IC_TX_ABRT_GCALL_NOACK)
+
+/*
+ * i2c scl hcnt/lcnt setting
+ */
+#define PNW_SS_SCLK_HCNT 0x1EC
+#define PNW_SS_SCLK_LCNT 0x1F3
+#define PNW_FS_SCLK_HCNT 0x66
+#define PNW_FS_SCLK_LCNT 0x8B
+#define PNW_HS_SCLK_HCNT 0x9
+#define PNW_HS_SCLK_LCNT 0x17
+
+#define CLV_SS_SCLK_HCNT 0x1EC
+#define CLV_SS_SCLK_LCNT 0x1F3
+#define CLV_FS_SCLK_HCNT 0x59
+#define CLV_FS_SCLK_LCNT 0x98
+#define CLV_HS_SCLK_HCNT 0x8
+#define CLV_HS_SCLK_LCNT 0x17
+
+/* inofficial configuration
+#define MERR_SS_SCLK_HCNT 0x2c8
+#define MERR_SS_SCLK_LCNT 0x380
+#define MERR_FS_SCLK_HCNT 0x084
+#define MERR_FS_SCLK_LCNT 0x100
+*/
+#define MERR_SS_SCLK_HCNT 0x2f8
+#define MERR_SS_SCLK_LCNT 0x37b
+#define MERR_FS_SCLK_HCNT 0x087
+#define MERR_FS_SCLK_LCNT 0x10a
+#define MERR_HS_SCLK_HCNT 0x8
+#define MERR_HS_SCLK_LCNT 0x20
+
+#define VLV2_SS_SCLK_HCNT 0x214
+#define VLV2_SS_SCLK_LCNT 0x272
+#define VLV2_FS_SCLK_HCNT 0x50
+#define VLV2_FS_SCLK_LCNT 0xad
+#define VLV2_HS_SCLK_HCNT 0x6
+#define VLV2_HS_SCLK_LCNT 0x16
+
+#define DW_STD_SPEED 100000
+#define DW_FAST_SPEED 400000
+#define DW_HIGH_SPEED 3400000
+
+struct dw_controller;
/**
* struct dw_i2c_dev - private i2c-designware data
* @dev: driver model device node
* @adapter: i2c subsystem adapter node
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
- * @rx_outstanding: current master-rx elements in tx fifo
*/
struct dw_i2c_dev {
struct device *dev;
void __iomem *base;
struct completion cmd_complete;
- struct mutex lock;
+ struct semaphore lock;
struct clk *clk;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
- struct dw_pci_controller *controller;
+ int (*get_scl_cfg) (struct dw_i2c_dev *dev);
+ void (*reset)(struct dw_i2c_dev *dev);
+ int (*abort)(int busnum);
+ struct dw_controller *controller;
+ int enable_stop;
+ int share_irq;
int cmd_err;
struct i2c_msg *msgs;
int msgs_num;
unsigned int status;
u32 abort_source;
int irq;
- u32 accessor_flags;
+ int swab;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
- int rx_outstanding;
+ int use_dyn_clk; /* use dynamic clk setting */
+ u32 clk_khz; /* input clock */
+ u32 speed_cfg;
+ u32 lock_flag;
+ u32 freq;
+};
+
+struct dw_controller {
+ u32 bus_num;
+ u32 bus_cfg;
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
+ u32 clk_khz;
+ int enable_stop;
+ int share_irq;
+ char *acpi_name;
+ int (*scl_cfg) (struct dw_i2c_dev *dev);
+ void (*reset)(struct dw_i2c_dev *dev);
+};
+
+enum dw_ctl_id_t {
+ moorestown_0,
+ moorestown_1,
+ moorestown_2,
+
+ medfield_0,
+ medfield_1,
+ medfield_2,
+ medfield_3,
+ medfield_4,
+ medfield_5,
+
+ cloverview_0,
+ cloverview_1,
+ cloverview_2,
+ cloverview_3,
+ cloverview_4,
+ cloverview_5,
+
+ merrifield_0,
+ merrifield_1,
+ merrifield_2,
+ merrifield_3,
+ merrifield_4,
+ merrifield_5,
+ merrifield_6,
+
+ valleyview_0,
+ valleyview_1,
+ valleyview_2,
+ valleyview_3,
+ valleyview_4,
+ valleyview_5,
+ valleyview_6,
+
+ cherryview_0 = valleyview_0,
+ cherryview_1 = valleyview_1,
+ cherryview_2 = valleyview_2,
+ cherryview_3 = valleyview_3,
+ cherryview_4 = valleyview_4,
+ cherryview_5 = valleyview_5,
+ cherryview_6 = valleyview_6,
};
-#define ACCESS_SWAP 0x00000001
-#define ACCESS_16BIT 0x00000002
-
-extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
-extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
-extern int i2c_dw_init(struct dw_i2c_dev *dev);
-extern int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
- int num);
-extern u32 i2c_dw_func(struct i2c_adapter *adap);
-extern irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
-extern void i2c_dw_enable(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev);
-extern void i2c_dw_disable(struct dw_i2c_dev *dev);
-extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
-extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+extern int intel_mid_dw_i2c_abort(int busnum);
+int i2c_dw_init(struct dw_i2c_dev *dev);
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+ unsigned long start, unsigned long len, int irq);
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev);
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime);
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime);
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev);
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
#include "i2c-designware-core.h"
#define DRIVER_NAME "i2c-designware-pci"
+#define DW_I2C_STATIC_BUS_NUM 10
-enum dw_pci_ctl_id_t {
- moorestown_0,
- moorestown_1,
- moorestown_2,
-
- medfield_0,
- medfield_1,
- medfield_2,
- medfield_3,
- medfield_4,
- medfield_5,
-};
+static int i2c_dw_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
-struct dw_pci_controller {
- u32 bus_num;
- u32 bus_cfg;
- u32 tx_fifo_depth;
- u32 rx_fifo_depth;
- u32 clk_khz;
-};
+ dev_dbg(dev, "suspend called\n");
-#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
- DW_IC_CON_SLAVE_DISABLE | \
- DW_IC_CON_RESTART_EN)
-
-static struct dw_pci_controller dw_pci_controllers[] = {
- [moorestown_0] = {
- .bus_num = 0,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [moorestown_1] = {
- .bus_num = 1,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [moorestown_2] = {
- .bus_num = 2,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_0] = {
- .bus_num = 0,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_1] = {
- .bus_num = 1,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_2] = {
- .bus_num = 2,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_3] = {
- .bus_num = 3,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_4] = {
- .bus_num = 4,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_5] = {
- .bus_num = 5,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
-};
-static struct i2c_algorithm i2c_dw_algo = {
- .master_xfer = i2c_dw_xfer,
- .functionality = i2c_dw_func,
-};
+ return i2c_dw_suspend(i2c, false);
+}
-static int i2c_dw_pci_suspend(struct device *dev)
+static int i2c_dw_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
int err;
-
- i2c_dw_disable(i2c);
+ dev_dbg(dev, "runtime suspend called\n");
+ i2c_dw_suspend(i2c, true);
err = pci_save_state(pdev);
if (err) {
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
- int err;
- u32 enabled;
- enabled = i2c_dw_is_enabled(i2c);
- if (enabled)
- return 0;
+ dev_dbg(dev, "resume called\n");
+ return i2c_dw_resume(i2c, false);
+}
+
+static int i2c_dw_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+ int err;
+ dev_dbg(dev, "runtime resume called\n");
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
dev_err(&pdev->dev, "pci_set_power_state() failed\n");
return err;
}
-
pci_restore_state(pdev);
+ i2c_dw_resume(i2c, true);
- i2c_dw_init(i2c);
return 0;
}
-static int i2c_dw_pci_runtime_idle(struct device *dev)
-{
- int err = pm_schedule_suspend(dev, 500);
- dev_dbg(dev, "runtime_idle called\n");
-
- if (err != 0)
- return 0;
- return -EBUSY;
-}
-
static const struct dev_pm_ops i2c_dw_pm_ops = {
- .resume = i2c_dw_pci_resume,
- .suspend = i2c_dw_pci_suspend,
- SET_RUNTIME_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume,
- i2c_dw_pci_runtime_idle)
+ .suspend_late = i2c_dw_pci_suspend,
+ .resume_early = i2c_dw_pci_resume,
+ SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend,
+ i2c_dw_pci_runtime_resume,
+ NULL)
};
-static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
-{
- return dev->controller->clk_khz;
-}
-
static int i2c_dw_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+const struct pci_device_id *id)
{
struct dw_i2c_dev *dev;
- struct i2c_adapter *adap;
+ unsigned long start, len;
int r;
- struct dw_pci_controller *controller;
-
- if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
- dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
- id->driver_data);
- return -EINVAL;
- }
+ int bus_idx;
+ static int bus_num;
- controller = &dw_pci_controllers[id->driver_data];
+ bus_idx = id->driver_data + bus_num;
+ bus_num++;
- r = pcim_enable_device(pdev);
+ r = pci_enable_device(pdev);
if (r) {
dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
r);
return r;
}
- r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ /* Determine the address of the I2C area */
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!start || len == 0) {
+ dev_err(&pdev->dev, "base address not set\n");
+ return -ENODEV;
+ }
+
+ r = pci_request_region(pdev, 0, DRIVER_NAME);
if (r) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ dev_err(&pdev->dev, "failed to request I2C region "
+ "0x%lx-0x%lx\n", start,
+ (unsigned long)pci_resource_end(pdev, 0));
return r;
}
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
- dev->clk = NULL;
- dev->controller = controller;
- dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
- dev->base = pcim_iomap_table(pdev)[0];
- dev->dev = &pdev->dev;
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
- dev->master_cfg = controller->bus_cfg;
+ dev = i2c_dw_setup(&pdev->dev, bus_idx, start, len, pdev->irq);
+ if (IS_ERR(dev)) {
+ pci_release_region(pdev, 0);
+ dev_err(&pdev->dev, "failed to setup i2c\n");
+ return -EINVAL;
+ }
pci_set_drvdata(pdev, dev);
- dev->tx_fifo_depth = controller->tx_fifo_depth;
- dev->rx_fifo_depth = controller->rx_fifo_depth;
- r = i2c_dw_init(dev);
- if (r)
- return r;
-
- adap = &dev->adapter;
- i2c_set_adapdata(adap, dev);
- adap->owner = THIS_MODULE;
- adap->class = 0;
- adap->algo = &i2c_dw_algo;
- adap->dev.parent = &pdev->dev;
- adap->nr = controller->bus_num;
- snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci-%d",
- adap->nr);
-
- r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
- adap->name, dev);
- if (r) {
- dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
- return r;
- }
-
- i2c_dw_disable_int(dev);
- i2c_dw_clear_int(dev);
- r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
- return r;
- }
+ i2c_acpi_devices_setup(&pdev->dev, dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
- i2c_dw_disable(dev);
pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
-
- i2c_del_adapter(&dev->adapter);
+ i2c_dw_free(&pdev->dev, dev);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_region(pdev, 0);
}
/* work with hotplug and coldplug */
MODULE_ALIAS("i2c_designware-pci");
-static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+DEFINE_PCI_DEVICE_TABLE(i2c_designware_pci_ids) = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
- { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
- { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
+ { PCI_VDEVICE(INTEL, 0x0803), moorestown_0 },
+ { PCI_VDEVICE(INTEL, 0x0804), moorestown_0 },
/* Medfield */
- { PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
- { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
- { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
+ { PCI_VDEVICE(INTEL, 0x0817), medfield_0 },
+ { PCI_VDEVICE(INTEL, 0x0818), medfield_0 },
+ { PCI_VDEVICE(INTEL, 0x0819), medfield_0 },
{ PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
- { PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
- { PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+ { PCI_VDEVICE(INTEL, 0x082D), medfield_0 },
+ { PCI_VDEVICE(INTEL, 0x082E), medfield_0 },
+ /* Cloverview */
+ { PCI_VDEVICE(INTEL, 0x08E2), cloverview_0 },
+ { PCI_VDEVICE(INTEL, 0x08E3), cloverview_0 },
+ { PCI_VDEVICE(INTEL, 0x08E4), cloverview_0 },
+ { PCI_VDEVICE(INTEL, 0x08F4), cloverview_0 },
+ { PCI_VDEVICE(INTEL, 0x08F5), cloverview_0 },
+ { PCI_VDEVICE(INTEL, 0x08F6), cloverview_0 },
+ /* Merrifield */
+ { PCI_VDEVICE(INTEL, 0x1195), merrifield_0 },
+ { PCI_VDEVICE(INTEL, 0x1196), merrifield_0 },
+ /* Valleyview 2 */
+ { PCI_VDEVICE(INTEL, 0x0F41), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F42), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F43), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F44), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F45), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F46), valleyview_0 },
+ { PCI_VDEVICE(INTEL, 0x0F47), valleyview_0 },
+ /* Cherryview */
+ { PCI_VDEVICE(INTEL, 0x22C1), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C2), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C3), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C4), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C5), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C6), cherryview_0 },
+ { PCI_VDEVICE(INTEL, 0x22C7), cherryview_0 },
{ 0,}
};
-MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids);
static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
- .id_table = i2_designware_pci_ids,
+ .id_table = i2c_designware_pci_ids,
.probe = i2c_dw_pci_probe,
.remove = i2c_dw_pci_remove,
.driver = {
},
};
-module_pci_driver(dw_i2c_driver);
+static int __init dw_i2c_init_driver(void)
+{
+ return pci_register_driver(&dw_i2c_driver);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ pci_unregister_driver(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+#ifndef MODULE
+static int __init dw_i2c_reserve_static_bus(void)
+{
+ struct i2c_board_info dummy = {
+ I2C_BOARD_INFO("dummy", 0xff),
+ };
+
+ i2c_register_board_info(DW_I2C_STATIC_BUS_NUM, &dummy, 1);
+ return 0;
+}
+subsys_initcall(dw_i2c_reserve_static_bus);
+
+static void dw_i2c_pci_final_quirks(struct pci_dev *pdev)
+{
+ pdev->pm_cap = 0x80;
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0F44,
+ dw_i2c_pci_final_quirks);
+#endif
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/of_i2c.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include "i2c-designware-core.h"
-static struct i2c_algorithm i2c_dw_algo = {
- .master_xfer = i2c_dw_xfer,
- .functionality = i2c_dw_func,
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_i2c_acpi_ids[] = {
+ { "80860F41", valleyview_0 },
+ { "808622C1", cherryview_0 },
+ { }
};
-static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_ids);
+#endif
+
+static int dw_i2c_plat_suspend(struct device *dev)
{
- return clk_get_rate(dev->clk)/1000;
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "suspend called\n");
+ return i2c_dw_suspend(i2c, false);
}
-#ifdef CONFIG_ACPI
-static int dw_i2c_acpi_configure(struct platform_device *pdev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
- struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
- if (!ACPI_HANDLE(&pdev->dev))
- return -ENODEV;
+ dev_dbg(dev, "runtime suspend called\n");
+ i2c_dw_suspend(i2c, true);
- dev->adapter.nr = -1;
- dev->tx_fifo_depth = 32;
- dev->rx_fifo_depth = 32;
return 0;
}
-static const struct acpi_device_id dw_i2c_acpi_match[] = {
- { "INT33C2", 0 },
- { "INT33C3", 0 },
- { "80860F41", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
-#else
-static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
+static int dw_i2c_plat_resume(struct device *dev)
{
- return -ENODEV;
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "resume called\n");
+ return i2c_dw_resume(i2c, false);
}
-#endif
-static int dw_i2c_probe(struct platform_device *pdev)
+static int dw_i2c_plat_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "runtime resume called\n");
+ i2c_dw_resume(i2c, true);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_i2c_plat_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend,
+ dw_i2c_plat_resume)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+ dw_i2c_plat_runtime_resume,
+ NULL)
+};
+
+static int __init dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
- struct i2c_adapter *adap;
- struct resource *mem;
- int irq, r;
+ struct resource *mem, *ioarea;
+ const struct acpi_device_id *id;
+ unsigned long start, len;
+ int bus_idx = 0;
+ static int bus_num;
+ int irq;
+
+#ifdef CONFIG_ACPI
+ for (id = dw_i2c_acpi_ids; id->id[0]; id++)
+ if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
+ bus_idx = id->driver_data + bus_num;
+ bus_num++;
+ }
+#else
+ bus_idx = pdev->id;
+#endif
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev_err(&pdev->dev, "no mem resource?\n");
return -EINVAL;
}
+ start = mem->start;
+ len = resource_size(mem);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
return irq; /* -ENXIO */
}
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
- init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
- dev->dev = &pdev->dev;
- dev->irq = irq;
- platform_set_drvdata(pdev, dev);
-
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
-
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
- clk_prepare_enable(dev->clk);
-
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_10BIT_ADDR |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
-
- /* Try first if we can configure the device from ACPI */
- r = dw_i2c_acpi_configure(pdev);
- if (r) {
- u32 param1 = i2c_dw_read_comp_param(dev);
-
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- dev->adapter.nr = pdev->id;
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
}
- r = i2c_dw_init(dev);
- if (r)
- return r;
- i2c_dw_disable_int(dev);
- r = devm_request_irq(&pdev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
- pdev->name, dev);
- if (r) {
- dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
- return r;
+ dev = i2c_dw_setup(&pdev->dev, bus_idx, start, len, irq);
+ if (IS_ERR(dev)) {
+ release_mem_region(mem->start, resource_size(mem));
+ dev_err(&pdev->dev, "failed to setup i2c\n");
+ return -EINVAL;
}
- adap = &dev->adapter;
- i2c_set_adapdata(adap, dev);
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
- sizeof(adap->name));
- adap->algo = &i2c_dw_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, dev);
- r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
- return r;
- }
- of_i2c_register_devices(adap);
- acpi_i2c_register_devices(adap);
+ acpi_i2c_register_devices(&dev->adapter);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
- pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
return 0;
}
-static int dw_i2c_remove(struct platform_device *pdev)
+static int __exit dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
- pm_runtime_get_sync(&pdev->dev);
-
- i2c_del_adapter(&dev->adapter);
-
- i2c_dw_disable(dev);
-
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id dw_i2c_of_match[] = {
- { .compatible = "snps,designware-i2c", },
- {},
-};
-MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
-#endif
-
-#ifdef CONFIG_PM
-static int dw_i2c_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(i_dev->clk);
-
- return 0;
-}
-
-static int dw_i2c_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
-
- clk_prepare_enable(i_dev->clk);
- i2c_dw_init(i_dev);
-
+ pm_runtime_forbid(&pdev->dev);
+ i2c_dw_free(&pdev->dev, dev);
+ platform_set_drvdata(pdev, NULL);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
return 0;
}
-#endif
-
-static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(dw_i2c_of_match),
- .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = &dw_i2c_dev_pm_ops,
+ .pm = &dw_i2c_plat_pm_ops,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = ACPI_PTR(dw_i2c_acpi_ids),
+#endif
},
};
static int __init dw_i2c_init_driver(void)
{
+ struct pci_dev *dw_pci;
+
+ /*
+ * Try to get pci device, if exist, then exit ACPI platform
+ * register, On BYT FDK, include two enum mode: PCI, ACPI,
+ * ignore ACPI enum mode.
+ */
+ dw_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0F41, NULL);
+ if (dw_pci) {
+ pr_info("DW I2C: Find I2C controller in PCI device, "
+ "exit ACPI platform register!\n");
+ return 0;
+ }
+
return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
}
-subsys_initcall(dw_i2c_init_driver);
+module_init(dw_i2c_init_driver);
static void __exit dw_i2c_exit_driver(void)
{
--- /dev/null
+/*
+ * i2c-pmic-regs.h - PMIC I2C registers
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#ifndef __I2C_PMIC_REGS_H__
+#define __I2C_PMIC_REGS_H__
+
+#include <linux/mutex.h>
+
+/*********************************************************************
+ * Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+
+#define I2C_MSG_LEN 4
+
+#define I2COVRCTRL_ADDR 0x58
+#define I2COVRDADDR_ADDR 0x59
+#define I2COVROFFSET_ADDR 0x5A
+#define I2COVRWRDATA_ADDR 0x5B
+#define I2COVRRDDATA_ADDR 0x5C
+
+#define IRQLVL1_ADDR 0x01
+#define IRQLVL1_MASK_ADDR 0x0c
+#define IRQLVL1_CHRGR_MASK D5
+
+#define MCHGRIRQ1_ADDR 0x13
+#define MCHGRIRQ0_ADDR 0x12
+
+#define PMIC_I2C_INTR_MASK ((u8)(D3|D2|D1))
+#define I2COVRCTRL_I2C_RD D1
+#define I2COVRCTRL_I2C_WR D0
+#define CHGRIRQ0_ADDR 0x07
+
+#define IRQ0_I2C_BIT_POS 1
+
+struct pmic_i2c_dev {
+ int irq;
+ u32 pmic_intr_sram_addr;
+ struct i2c_adapter adapter;
+ int i2c_rw;
+ wait_queue_head_t i2c_wait;
+ struct mutex i2c_pmic_rw_lock;
+ void __iomem *pmic_intr_map;
+ struct device *dev;
+};
+
+#endif
--- /dev/null
+/*
+ * i2c-pmic.c: PMIC I2C adapter driver.
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "i2c-pmic-regs.h"
+
+#define DRIVER_NAME "i2c_pmic_adap"
+#define PMIC_I2C_ADAPTER 8
+
+enum I2C_STATUS {
+ I2C_WR = 1,
+ I2C_RD,
+ I2C_NACK = 4
+};
+
+static struct pmic_i2c_dev *pmic_dev;
+
+/* Function Definitions */
+
+/* PMIC I2C read-write completion interrupt handler */
+static irqreturn_t pmic_i2c_handler(int irq, void *data)
+{
+ u8 irq0_int;
+
+ irq0_int = ioread8(pmic_dev->pmic_intr_map);
+ irq0_int &= PMIC_I2C_INTR_MASK;
+
+ if (irq0_int) {
+ pmic_dev->i2c_rw = (irq0_int >> IRQ0_I2C_BIT_POS);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+#define IRQLVL1_MASK_ADDR 0x0c
+#define IRQLVL1_CHRGR_MASK D5
+
+ dev_dbg(pmic_dev->dev, "Clearing IRQLVL1_MASK_ADDR\n");
+
+ intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+ wake_up(&(pmic_dev->i2c_wait));
+ return IRQ_HANDLED;
+}
+
+/* PMIC i2c read msg */
+static inline int pmic_i2c_read_xfer(struct i2c_msg msg)
+{
+ int ret;
+ u16 i;
+ u8 mask = (I2C_RD | I2C_NACK);
+ u16 regs[I2C_MSG_LEN] = {0};
+ u8 data[I2C_MSG_LEN] = {0};
+
+ for (i = 0; i < msg.len ; i++) {
+ pmic_dev->i2c_rw = 0;
+ regs[0] = I2COVRDADDR_ADDR;
+ data[0] = msg.addr;
+ regs[1] = I2COVROFFSET_ADDR;
+ data[1] = msg.buf[0] + i;
+ /* intel_scu_ipc_function works fine for even number of bytes */
+ /* Hence adding a dummy byte transfer */
+ regs[2] = I2COVROFFSET_ADDR;
+ data[2] = msg.buf[0] + i;
+ regs[3] = I2COVRCTRL_ADDR;
+ data[3] = I2COVRCTRL_I2C_RD;
+ ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+ if (unlikely(ret))
+ return ret;
+
+ ret = wait_event_timeout(pmic_dev->i2c_wait,
+ (pmic_dev->i2c_rw & mask),
+ HZ);
+
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto read_err_exit;
+ } else if (pmic_dev->i2c_rw == I2C_NACK) {
+ ret = -EIO;
+ goto read_err_exit;
+ } else {
+ ret = intel_scu_ipc_ioread8(I2COVRRDDATA_ADDR,
+ &(msg.buf[i]));
+ if (unlikely(ret)) {
+ ret = -EIO;
+ goto read_err_exit;
+ }
+ }
+ }
+ return 0;
+
+read_err_exit:
+ return ret;
+}
+
+/* PMIC i2c write msg */
+static inline int pmic_i2c_write_xfer(struct i2c_msg msg)
+{
+ int ret;
+ u16 i;
+ u8 mask = (I2C_WR | I2C_NACK);
+ u16 regs[I2C_MSG_LEN] = {0};
+ u8 data[I2C_MSG_LEN] = {0};
+
+ for (i = 1; i <= msg.len ; i++) {
+ pmic_dev->i2c_rw = 0;
+ regs[0] = I2COVRDADDR_ADDR;
+ data[0] = msg.addr;
+ regs[1] = I2COVRWRDATA_ADDR;
+ data[1] = msg.buf[i];
+ regs[2] = I2COVROFFSET_ADDR;
+ data[2] = msg.buf[0] + i - 1;
+ regs[3] = I2COVRCTRL_ADDR;
+ data[3] = I2COVRCTRL_I2C_WR;
+ ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+ if (unlikely(ret))
+ return ret;
+
+ ret = wait_event_timeout(pmic_dev->i2c_wait,
+ (pmic_dev->i2c_rw & mask),
+ HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (pmic_dev->i2c_rw == I2C_NACK)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int (*xfer_fn[]) (struct i2c_msg) = {
+ pmic_i2c_write_xfer,
+ pmic_i2c_read_xfer
+};
+
+/* PMIC I2C Master transfer algorithm function */
+static int pmic_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ int ret = 0;
+ int i;
+ u8 index;
+
+ mutex_lock(&pmic_dev->i2c_pmic_rw_lock);
+ pm_runtime_get_sync(pmic_dev->dev);
+ for (i = 0 ; i < num ; i++) {
+ index = msgs[i].flags & I2C_M_RD;
+ ret = (xfer_fn[index])(msgs[i]);
+
+ if (ret == -EACCES)
+ dev_info(pmic_dev->dev, "Blocked Access!\n");
+
+ /* If access is restricted, return true to
+ * avoid extra error handling in client
+ */
+
+ if (ret != 0 && ret != -EACCES)
+ goto transfer_err_exit;
+ }
+
+ ret = num;
+
+transfer_err_exit:
+ mutex_unlock(&pmic_dev->i2c_pmic_rw_lock);
+ pm_runtime_put_sync(pmic_dev->dev);
+ intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+ return ret;
+}
+
+/* PMIC I2C adapter capability function */
+static u32 pmic_master_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int pmic_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+ int ret;
+
+ msg.addr = addr;
+ msg.flags = flags & I2C_M_TEN;
+ msg.buf = buf;
+ msg.buf[0] = command;
+ if (read_write == I2C_SMBUS_WRITE) {
+ msg.len = 1;
+ msg.buf[1] = data->byte;
+ } else {
+ msg.flags |= I2C_M_RD;
+ msg.len = 1;
+ }
+
+ ret = pmic_master_xfer(adap, &msg, 1);
+ if (ret == 1) {
+ if (read_write == I2C_SMBUS_READ)
+ data->byte = msg.buf[0];
+ return 0;
+ }
+ return ret;
+}
+
+
+static const struct i2c_algorithm pmic_i2c_algo = {
+ .master_xfer = pmic_master_xfer,
+ .functionality = pmic_master_func,
+ .smbus_xfer = pmic_smbus_xfer,
+};
+
+static int pmic_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adap;
+ int ret;
+
+ pmic_dev = kzalloc(sizeof(struct pmic_i2c_dev), GFP_KERNEL);
+ if (!pmic_dev)
+ return -ENOMEM;
+
+ pmic_dev->dev = &pdev->dev;
+ pmic_dev->irq = platform_get_irq(pdev, 0);
+
+
+
+ mutex_init(&pmic_dev->i2c_pmic_rw_lock);
+ init_waitqueue_head(&(pmic_dev->i2c_wait));
+
+ pmic_dev->pmic_intr_map = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+ if (!pmic_dev->pmic_intr_map) {
+ dev_err(&pdev->dev, "ioremap Failed\n");
+ ret = -ENOMEM;
+ goto ioremap_failed;
+ }
+ ret = request_threaded_irq(pmic_dev->irq, pmic_i2c_handler,
+ pmic_thread_handler,
+ IRQF_SHARED|IRQF_NO_SUSPEND,
+ DRIVER_NAME, pmic_dev);
+ if (ret)
+ goto err_irq_request;
+
+ ret = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+ if (unlikely(ret))
+ goto unmask_irq_failed;
+ ret = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+ PMIC_I2C_INTR_MASK);
+ if (unlikely(ret))
+ goto unmask_irq_failed;
+
+ /* Init runtime PM state*/
+ pm_runtime_put_noidle(pmic_dev->dev);
+
+ adap = &pmic_dev->adapter;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ adap->algo = &pmic_i2c_algo;
+ strcpy(adap->name, "PMIC I2C Adapter");
+ adap->nr = PMIC_I2C_ADAPTER;
+ ret = i2c_add_numbered_adapter(adap);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding the adapter\n");
+ goto err_adap_add;
+ }
+
+ pm_schedule_suspend(pmic_dev->dev, MSEC_PER_SEC);
+ return 0;
+
+err_adap_add:
+ free_irq(pmic_dev->irq, pmic_dev);
+unmask_irq_failed:
+err_irq_request:
+ iounmap(pmic_dev->pmic_intr_map);
+ioremap_failed:
+ kfree(pmic_dev);
+ return ret;
+}
+
+static int pmic_i2c_remove(struct platform_device *pdev)
+{
+ iounmap(pmic_dev->pmic_intr_map);
+ free_irq(pmic_dev->irq, pmic_dev);
+ pm_runtime_get_noresume(pmic_dev->dev);
+ kfree(pmic_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_i2c_suspend(struct device *dev)
+{
+ dev_info(dev, "%s\n", __func__);
+ return 0;
+}
+
+static int pmic_i2c_resume(struct device *dev)
+{
+ dev_info(dev, "%s\n", __func__);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_i2c_runtime_suspend(struct device *dev)
+{
+ dev_info(dev, "%s\n", __func__);
+ return 0;
+}
+
+static int pmic_i2c_runtime_resume(struct device *dev)
+{
+ dev_info(dev, "%s\n", __func__);
+ return 0;
+}
+
+static int pmic_i2c_runtime_idle(struct device *dev)
+{
+ dev_info(dev, "%s\n", __func__);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pmic_i2c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pmic_i2c_suspend,
+ pmic_i2c_resume)
+ SET_RUNTIME_PM_OPS(pmic_i2c_runtime_suspend,
+ pmic_i2c_runtime_resume,
+ pmic_i2c_runtime_idle)
+};
+
+struct platform_driver pmic_i2c_driver = {
+ .probe = pmic_i2c_probe,
+ .remove = pmic_i2c_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &pmic_i2c_pm_ops,
+ },
+};
+
+static int pmic_i2c_init(void)
+{
+ return platform_driver_register(&pmic_i2c_driver);
+}
+
+static void pmic_i2c_exit(void)
+{
+ platform_driver_unregister(&pmic_i2c_driver);
+}
+
+static int pmic_i2c_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed pmic_i2c rpmsg device\n");
+
+ ret = pmic_i2c_init();
+
+out:
+ return ret;
+}
+
+static void pmic_i2c_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ pmic_i2c_exit();
+ dev_info(&rpdev->dev, "Removed pmic_i2c rpmsg device\n");
+}
+
+static void pmic_i2c_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id pmic_i2c_rpmsg_id_table[] = {
+ { .name = "rpmsg_i2c_pmic_adap" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_i2c_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_i2c_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = pmic_i2c_rpmsg_id_table,
+ .probe = pmic_i2c_rpmsg_probe,
+ .callback = pmic_i2c_rpmsg_cb,
+ .remove = pmic_i2c_rpmsg_remove,
+};
+
+static int __init pmic_i2c_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+
+static void __exit pmic_i2c_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+module_init(pmic_i2c_rpmsg_init);
+module_exit(pmic_i2c_rpmsg_exit);
+
+MODULE_AUTHOR("Yegnesh Iyer <yegnesh.s.iyer@intel.com");
+MODULE_DESCRIPTION("PMIC I2C Master driver");
+MODULE_LICENSE("GPL");
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/intel_mid_pm.h>
#include <asm/cpu_device_id.h>
#include <asm/mwait.h>
#include <asm/msr.h>
.enter = NULL }
};
+static struct cpuidle_state vlv_cstates[CPUIDLE_STATE_MAX] = {
+ { /* MWAIT C1 */
+ .name = "C1-ATM",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "C4-ATM",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C6 */
+ .name = "C6-ATM",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle },
+ { /* MWAIT C7-S0i1 */
+ .name = "S0i1-ATM",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle },
+ { /* MWAIT C8-S0i2 */
+ .name = "S0i2-ATM",
+ .desc = "MWAIT 0x62",
+ .flags = MWAIT2flg(0x62) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2000,
+ .target_residency = 8000,
+ .enter = &intel_idle },
+ { /* MWAIT C9-S0i3 */
+ .name = "S0i3-ATM",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER)
+static struct cpuidle_state mrfld_cstates[CPUIDLE_STATE_MAX] = {
+ { /* MWAIT C1 */
+ .name = "C1-ATM",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "C4-ATM",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C6 */
+ .name = "C6-ATM",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle },
+ { /* MWAIT C7-S0i1 */
+ .name = "S0i1-ATM",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle },
+ { /* MWAIT C9-S0i3 */
+ .name = "S0i3-ATM",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+#else
+#define mrfld_cstates atom_cstates
+#endif
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) || \
+ defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER)
+
+static struct cpuidle_state mfld_cstates[CPUIDLE_STATE_MAX] = {
+ { /* MWAIT C1 */
+ .name = "ATM-C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = CSTATE_EXIT_LATENCY_C1,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "ATM-C2",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = CSTATE_EXIT_LATENCY_C2,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "ATM-C4",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = CSTATE_EXIT_LATENCY_C4,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C6 */
+ .name = "ATM-C6",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = CSTATE_EXIT_LATENCY_C6,
+ .power_usage = C6_POWER_USAGE,
+ .target_residency = 560,
+ .enter = &soc_s0ix_idle },
+ {
+ .name = "ATM-S0i1",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = CSTATE_EXIT_LATENCY_S0i1,
+ .power_usage = S0I1_POWER_USAGE,
+ .enter = &soc_s0ix_idle },
+ {
+ .name = "ATM-LpAudio",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = CSTATE_EXIT_LATENCY_LPMP3,
+ .power_usage = LPMP3_POWER_USAGE,
+ .enter = &soc_s0ix_idle },
+ {
+ .name = "ATM-S0i3",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = CSTATE_EXIT_LATENCY_S0i3,
+ .power_usage = S0I3_POWER_USAGE,
+ .enter = &soc_s0ix_idle },
+ {
+ .enter = NULL }
+};
+
+static int enter_s0ix_state(u32 eax, int gov_req_state, int s0ix_state,
+ struct cpuidle_device *dev, int index)
+{
+ int s0ix_entered = 0;
+ int selected_state = C6_STATE_IDX;
+
+ if (atomic_add_return(1, &nr_cpus_in_c6) == num_online_cpus() &&
+ s0ix_state) {
+ s0ix_entered = mid_s0ix_enter(s0ix_state);
+ if (!s0ix_entered) {
+ if (pmu_is_s0ix_in_progress()) {
+ atomic_dec(&nr_cpus_in_c6);
+ eax = C4_HINT;
+ }
+ pmu_set_s0ix_complete();
+ }
+ }
+ switch (s0ix_state) {
+ case MID_S0I1_STATE:
+ trace_cpu_idle(S0I1_STATE_IDX, dev->cpu);
+ break;
+ case MID_LPMP3_STATE:
+ trace_cpu_idle(LPMP3_STATE_IDX, dev->cpu);
+ break;
+ case MID_S0I3_STATE:
+ trace_cpu_idle(S0I3_STATE_IDX, dev->cpu);
+ break;
+ case MID_S3_STATE:
+ trace_cpu_idle(S0I3_STATE_IDX, dev->cpu);
+ break;
+ default:
+ trace_cpu_idle((eax >> 4) + 1, dev->cpu);
+ }
+ __monitor((void *)¤t_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(eax, 1);
+
+ if (likely(eax == C6_HINT))
+ atomic_dec(&nr_cpus_in_c6);
+
+ /* During s0ix exit inform scu that OS
+ * has exited. In case scu is still waiting
+ * for ack c6 trigger, it would exit out
+ * of the ack-c6 timeout loop
+ */
+ pmu_set_s0ix_complete();
+
+ /* In case of demotion to S0i1/lpmp3 update last_state */
+ if (s0ix_entered) {
+ selected_state = S0I3_STATE_IDX;
+
+ if (s0ix_state == MID_S0I1_STATE) {
+ index = S0I1_STATE_IDX;
+ selected_state = S0I1_STATE_IDX;
+ } else if (s0ix_state == MID_LPMP3_STATE) {
+ index = LPMP3_STATE_IDX;
+ selected_state = LPMP3_STATE_IDX;
+ }
+ } else if (eax == C4_HINT) {
+ index = C4_STATE_IDX;
+ selected_state = C4_STATE_IDX;
+ } else
+ index = C6_STATE_IDX;
+
+ pmu_s0ix_demotion_stat(gov_req_state, selected_state);
+
+ return index;
+}
+
+static int soc_s0ix_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+ int cpu = smp_processor_id();
+ int s0ix_state = 0;
+ unsigned int cstate;
+ int gov_req_state = (int) eax;
+
+ /* Check if s0ix is already in progress,
+ * This is required to demote C6 while S0ix
+ * is in progress
+ */
+ if (unlikely(pmu_is_s0ix_in_progress()))
+ return intel_idle(dev, drv, C4_STATE_IDX);
+
+ /* check if we need/possible to do s0ix */
+ if (eax != C6_HINT)
+ s0ix_state = get_target_platform_state(&eax);
+
+ /*
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
+ */
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
+
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ stop_critical_timings();
+
+ if (!need_resched())
+ index = enter_s0ix_state(eax, gov_req_state,
+ s0ix_state, dev, index);
+
+ start_critical_timings();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+ return index;
+}
+#else
+#define mfld_cstates atom_cstates
+#endif
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static unsigned int get_target_residency(unsigned int cstate)
+{
+ unsigned int t_sleep = cpuidle_state_table[cstate].target_residency;
+ unsigned int prev_idx;
+
+ /* get the previous lower sleep state */
+ if ((cstate == 8) || (cstate == 9))
+ prev_idx = cstate - 2;
+ else
+ prev_idx = cstate - 1;
+
+ /* calculate target_residency only if not defined already */
+ if (!t_sleep) {
+ unsigned int p_active = cpuidle_state_table[0].power_usage;
+ unsigned int prev_state_power = cpuidle_state_table
+ [prev_idx].power_usage;
+ unsigned int curr_state_power = cpuidle_state_table
+ [cstate].power_usage;
+ unsigned int prev_state_lat = cpuidle_state_table
+ [prev_idx].exit_latency;
+ unsigned int curr_state_lat = cpuidle_state_table
+ [cstate].exit_latency;
+
+ if (curr_state_power && prev_state_power && p_active &&
+ prev_state_lat && curr_state_lat &&
+ (curr_state_lat > prev_state_lat) &&
+ (prev_state_power > curr_state_power)) {
+ t_sleep = (p_active * (curr_state_lat - prev_state_lat)
+ + (prev_state_lat * prev_state_power)
+ - (curr_state_lat * curr_state_power)) /
+ (prev_state_power - curr_state_power);
+
+ /* round-up target_residency */
+ t_sleep++;
+ }
+ }
+
+ WARN_ON(!t_sleep);
+
+ pr_debug(PREFIX "cpuidle: target_residency[%d]= %d\n", cstate, t_sleep);
+
+ return t_sleep;
+}
+#endif
+
/**
* intel_idle
* @dev: cpuidle_device
unsigned int cstate;
int cpu = smp_processor_id();
+#if (defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) && \
+ defined(CONFIG_PM_DEBUG))
+ {
+ /* Get Cstate based on ignore table from PMU driver */
+ unsigned int ncstate;
+ cstate =
+ (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+ ncstate = pmu_get_new_cstate(cstate, &index);
+ eax = flg2MWAIT(drv->states[index].flags);
+ }
+#endif
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
/*
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_mrfld = {
+ .state_table = mrfld_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
+ ICPU(0x4a, idle_cpu_mrfld), /* Tangier SoC */
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
- /* if sub-state in table is not enumerated by CPUID */
- if ((mwait_substate + 1) > num_substates)
- continue;
+ /* FIXME: Do not check number of substates for any states above C6
+ * as these are not real C states supported by the CPU, they
+ * are emulated c states for s0ix support.
+ */
+ if ((cstate + 1) < 6) {
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ }
+#if !defined(CONFIG_ATOM_SOC_POWER)
+ if (boot_cpu_data.x86_model != 0x37) {
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
+ continue;
+ }
+#endif
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
- /* if sub-state in table is not enumerated by CPUID */
- if ((mwait_substate + 1) > num_substates)
- continue;
+ /* FIXME: Do not check number of substates for any states above C6
+ * as these are not real C states supported by the CPU, they
+ * are emulated c states for s0ix support.
+ */
+ if ((cstate + 1) < 6) {
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ }
+#if !defined(CONFIG_ATOM_SOC_POWER)
+ if (boot_cpu_data.x86_model != 0x37) {
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
+ continue;
+ }
+#endif
dev->state_count += 1;
}
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
+config TI_ADS7955_ADC
+ tristate "Texas Instruments ADS7955 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Texas Instruments ADS7955
+ 8 Channel ADC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ads7955.
+
config TI_AM335X_ADC
tristate "TI's ADC driver"
depends on MFD_TI_AM335X_TSCADC
Say yes here to access the ADC part of the Nano River
Technologies Viperboard.
+config IIO_BASINCOVE_GPADC
+ tristate "IIO Basincove GPADC driver"
+ depends on IIO
+ help
+ Say yes here to build support for the IIO basincove GPADC driver.
+
endmenu
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADS7955_ADC) += ti-ads7955.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+obj-$(CONFIG_IIO_BASINCOVE_GPADC) += iio_basincove_gpadc.o
--- /dev/null
+/*
+ * iio_basincove_gpadc.c - Intel Merrifield Basin Cove GPADC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_basincove_gpadc.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/types.h>
+#include <linux/iio/consumer.h>
+
+struct gpadc_info {
+ int initialized;
+ /* This mutex protects gpadc sample/config from concurrent conflict.
+ Any function, which does the sample or config, needs to
+ hold this lock.
+ If it is locked, it also means the gpadc is in active mode.
+ */
+ struct mutex lock;
+ struct device *dev;
+ int irq;
+ u8 irq_status;
+ wait_queue_head_t wait;
+ int sample_done;
+ void __iomem *intr;
+ int channel_num;
+ struct gpadc_regmap_t *gpadc_regmaps;
+ struct gpadc_regs_t *gpadc_regs;
+};
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+ return intel_scu_ipc_update_register(addr, 0, mask);
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+ return intel_scu_ipc_update_register(addr, 0xff, mask);
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+ return intel_scu_ipc_iowrite8(addr, data);
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+ return intel_scu_ipc_ioread8(addr, data);
+}
+
+static int gpadc_busy_wait(struct gpadc_regs_t *regs)
+{
+ u8 tmp;
+ int timeout = 0;
+
+ gpadc_read(regs->gpadcreq, &tmp);
+ while (tmp & regs->gpadcreq_busy && timeout < 500) {
+ gpadc_read(regs->gpadcreq, &tmp);
+ usleep_range(1800, 2000);
+ timeout++;
+ }
+
+ if (tmp & regs->gpadcreq_busy)
+ return -EBUSY;
+ else
+ return 0;
+}
+
+static void gpadc_dump(struct gpadc_info *info)
+{
+ u8 tmp;
+ struct gpadc_regs_t *regs = info->gpadc_regs;
+
+ dev_err(info->dev, "GPADC registers dump:\n");
+ gpadc_read(regs->adcirq, &tmp);
+ dev_err(info->dev, "ADCIRQ: 0x%x\n", tmp);
+ gpadc_read(regs->madcirq, &tmp);
+ dev_err(info->dev, "MADCIRQ: 0x%x\n", tmp);
+ gpadc_read(regs->gpadcreq, &tmp);
+ dev_err(info->dev, "GPADCREQ: 0x%x\n", tmp);
+ gpadc_read(regs->adc1cntl, &tmp);
+ dev_err(info->dev, "ADC1CNTL: 0x%x\n", tmp);
+}
+
+static irqreturn_t gpadc_isr(int irq, void *data)
+{
+ struct gpadc_info *info = iio_priv(data);
+
+ info->irq_status = ioread8(info->intr);
+ info->sample_done = 1;
+ wake_up(&info->wait);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t gpadc_threaded_isr(int irq, void *data)
+{
+ struct gpadc_info *info = iio_priv(data);
+ struct gpadc_regs_t *regs = info->gpadc_regs;
+
+ /* Clear IRQLVL1MASK */
+ gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * iio_basincove_gpadc_sample - do gpadc sample.
+ * @indio_dev: industrial IO GPADC device handle
+ * @ch: gpadc bit set of channels to sample, for example, set ch = (1<<0)|(1<<2)
+ * means you are going to sample both channel 0 and 2 at the same time.
+ * @res:gpadc sampling result
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+ int ch, struct gpadc_result *res)
+{
+ struct gpadc_info *info = iio_priv(indio_dev);
+ int i, ret;
+ u8 tmp, th, tl;
+ u8 mask;
+ struct gpadc_regs_t *regs = info->gpadc_regs;
+
+ if (!info->initialized)
+ return -ENODEV;
+
+ mutex_lock(&info->lock);
+
+ mask = MBATTEMP | MSYSTEMP | MBATT | MVIBATT | MCCTICK;
+ gpadc_clear_bits(regs->madcirq, mask);
+ gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+ tmp = regs->gpadcreq_irqen;
+
+ for (i = 0; i < info->channel_num; i++) {
+ if (ch & (1 << i))
+ tmp |= (1 << info->gpadc_regmaps[i].cntl);
+ }
+
+ info->sample_done = 0;
+
+ ret = gpadc_busy_wait(regs);
+ if (ret) {
+ dev_err(info->dev, "GPADC is busy\n");
+ goto done;
+ }
+
+ gpadc_write(regs->gpadcreq, tmp);
+
+ ret = wait_event_timeout(info->wait, info->sample_done, HZ);
+ if (ret == 0) {
+ gpadc_dump(info);
+ ret = -ETIMEDOUT;
+ dev_err(info->dev, "sample timeout, return %d\n", ret);
+ goto done;
+ } else {
+ ret = 0;
+ }
+
+ for (i = 0; i < info->channel_num; i++) {
+ if (ch & (1 << i)) {
+ gpadc_read(info->gpadc_regmaps[i].rslth, &th);
+ gpadc_read(info->gpadc_regmaps[i].rsltl, &tl);
+ res->data[i] = ((th & 0x3) << 8) + tl;
+ }
+ }
+
+done:
+ gpadc_set_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+ gpadc_set_bits(regs->madcirq, mask);
+ mutex_unlock(&info->lock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_basincove_gpadc_sample);
+
+static struct gpadc_result sample_result;
+static int chs;
+
+static ssize_t intel_basincove_gpadc_store_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gpadc_info *info = iio_priv(indio_dev);
+
+ if (sscanf(buf, "%x", &chs) != 1) {
+ dev_err(dev, "one channel argument is needed\n");
+ return -EINVAL;
+ }
+
+ if (chs < (1 << 0) || chs >= (1 << info->channel_num)) {
+ dev_err(dev, "invalid channel, should be in [0x1 - 0x1FF]\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", chs);
+}
+
+static ssize_t intel_basincove_gpadc_store_sample(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int value, ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ memset(sample_result.data, 0, sizeof(sample_result.data));
+
+ if (sscanf(buf, "%d", &value) != 1) {
+ dev_err(dev, "one argument is needed\n");
+ return -EINVAL;
+ }
+
+ if (value == 1) {
+ ret = iio_basincove_gpadc_sample(indio_dev, chs,
+ &sample_result);
+ if (ret) {
+ dev_err(dev, "sample failed\n");
+ return ret;
+ }
+ } else {
+ dev_err(dev, "input '1' to sample\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_result(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int used = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gpadc_info *info = iio_priv(indio_dev);
+
+ for (i = 0; i < info->channel_num; i++) {
+ used += snprintf(buf + used, PAGE_SIZE - used,
+ "sample_result[%d] = %d\n", i, sample_result.data[i]);
+ }
+
+ return used;
+}
+
+
+static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO,
+ intel_basincove_gpadc_show_channel,
+ intel_basincove_gpadc_store_channel);
+static DEVICE_ATTR(sample, S_IWUSR, NULL, intel_basincove_gpadc_store_sample);
+static DEVICE_ATTR(result, S_IRUGO, intel_basincove_gpadc_show_result, NULL);
+
+static struct attribute *intel_basincove_gpadc_attrs[] = {
+ &dev_attr_channel.attr,
+ &dev_attr_sample.attr,
+ &dev_attr_result.attr,
+ NULL,
+};
+static struct attribute_group intel_basincove_gpadc_attr_group = {
+ .name = "basincove_gpadc",
+ .attrs = intel_basincove_gpadc_attrs,
+};
+
+static int basincove_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ int ret;
+ int ch = chan->channel;
+ struct gpadc_info *info = iio_priv(indio_dev);
+ struct gpadc_result res;
+
+ ret = iio_basincove_gpadc_sample(indio_dev, (1 << ch), &res);
+ if (ret) {
+ dev_err(info->dev, "sample failed\n");
+ return -EINVAL;
+ }
+
+ *val = res.data[ch];
+
+ return ret;
+}
+
+static int basincove_adc_read_all_raw(struct iio_channel *chan,
+ int *val)
+{
+ int ret;
+ int i, num = 0;
+ int ch = 0;
+ int *channels;
+ struct gpadc_info *info = iio_priv(chan->indio_dev);
+ struct gpadc_result res;
+
+ while (chan[num].indio_dev)
+ num++;
+
+ channels = kzalloc(sizeof(int) * num, GFP_KERNEL);
+ if (channels == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ channels[i] = chan[i].channel->channel;
+ ch |= (1 << channels[i]);
+ }
+
+ ret = iio_basincove_gpadc_sample(chan->indio_dev, ch, &res);
+ if (ret) {
+ dev_err(info->dev, "sample failed\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ for (i = 0; i < num; i++)
+ val[i] = res.data[channels[i]];
+
+end:
+ kfree(channels);
+ return ret;
+}
+
+static const struct iio_info basincove_adc_info = {
+ .read_raw = &basincove_adc_read_raw,
+ .read_all_raw = &basincove_adc_read_all_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int bcove_gpadc_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpadc_info *info;
+ struct iio_dev *indio_dev;
+ struct intel_basincove_gpadc_platform_data *pdata =
+ pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ indio_dev = iio_device_alloc(sizeof(struct gpadc_info));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev, "allocating iio device failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ info = iio_priv(indio_dev);
+
+ mutex_init(&info->lock);
+ init_waitqueue_head(&info->wait);
+ info->dev = &pdev->dev;
+ info->irq = platform_get_irq(pdev, 0);
+ info->intr = ioremap_nocache(pdata->intr, 1);
+ if (!info->intr) {
+ dev_err(&pdev->dev, "ioremap of ADCIRQ failed\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+ info->channel_num = pdata->channel_num;
+ info->gpadc_regmaps = pdata->gpadc_regmaps;
+ info->gpadc_regs = pdata->gpadc_regs;
+
+ err = request_threaded_irq(info->irq, gpadc_isr, gpadc_threaded_isr,
+ IRQF_ONESHOT, "adc", indio_dev);
+ if (err) {
+ gpadc_dump(info);
+ dev_err(&pdev->dev, "unable to register irq %d\n", info->irq);
+ goto err_iounmap;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+
+ indio_dev->channels = pdata->gpadc_channels;
+ indio_dev->num_channels = pdata->channel_num;
+ indio_dev->info = &basincove_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ err = iio_map_array_register(indio_dev, pdata->gpadc_iio_maps);
+ if (err)
+ goto err_release_irq;
+
+ err = iio_device_register(indio_dev);
+ if (err < 0)
+ goto err_array_unregister;
+
+ err = sysfs_create_group(&pdev->dev.kobj,
+ &intel_basincove_gpadc_attr_group);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+ err);
+ goto err_iio_device_unregister;
+ }
+
+ info->initialized = 1;
+
+ dev_info(&pdev->dev, "bcove adc probed\n");
+
+ return 0;
+
+err_iio_device_unregister:
+ iio_device_unregister(indio_dev);
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+err_release_irq:
+ free_irq(info->irq, info);
+err_iounmap:
+ iounmap(info->intr);
+err_free:
+ iio_device_free(indio_dev);
+out:
+ return err;
+}
+
+static int bcove_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct gpadc_info *info = iio_priv(indio_dev);
+
+ sysfs_remove_group(&pdev->dev.kobj,
+ &intel_basincove_gpadc_attr_group);
+
+ iio_device_unregister(indio_dev);
+ iio_map_array_unregister(indio_dev);
+ free_irq(info->irq, info);
+ iounmap(info->intr);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcove_gpadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gpadc_info *info = iio_priv(indio_dev);
+
+ if (!mutex_trylock(&info->lock))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int bcove_gpadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gpadc_info *info = iio_priv(indio_dev);
+
+ mutex_unlock(&info->lock);
+ return 0;
+}
+#else
+#define bcove_gpadc_suspend NULL
+#define bcove_gpadc_resume NULL
+#endif
+
+static const struct dev_pm_ops bcove_gpadc_driver_pm_ops = {
+ .suspend = bcove_gpadc_suspend,
+ .resume = bcove_gpadc_resume,
+};
+
+static struct platform_driver bcove_gpadc_driver = {
+ .driver = {
+ .name = "bcove_adc",
+ .owner = THIS_MODULE,
+ .pm = &bcove_gpadc_driver_pm_ops,
+ },
+ .probe = bcove_gpadc_probe,
+ .remove = bcove_gpadc_remove,
+};
+
+static int bcove_gpadc_module_init(void)
+{
+ return platform_driver_register(&bcove_gpadc_driver);
+}
+
+static void bcove_gpadc_module_exit(void)
+{
+ platform_driver_unregister(&bcove_gpadc_driver);
+}
+
+static int bcove_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed bcove_gpadc rpmsg device\n");
+
+ ret = bcove_gpadc_module_init();
+
+out:
+ return ret;
+}
+
+static void bcove_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ bcove_gpadc_module_exit();
+ dev_info(&rpdev->dev, "Removed bcove_gpadc rpmsg device\n");
+}
+
+static void bcove_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id bcove_adc_rpmsg_id_table[] = {
+ { .name = "rpmsg_bcove_adc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, bcove_adc_rpmsg_id_table);
+
+static struct rpmsg_driver bcove_adc_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = bcove_adc_rpmsg_id_table,
+ .probe = bcove_adc_rpmsg_probe,
+ .callback = bcove_adc_rpmsg_cb,
+ .remove = bcove_adc_rpmsg_remove,
+};
+
+static int __init bcove_adc_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&bcove_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(bcove_adc_rpmsg_init);
+#else
+rootfs_initcall(bcove_adc_rpmsg_init);
+#endif
+
+static void __exit bcove_adc_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&bcove_adc_rpmsg);
+}
+module_exit(bcove_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Yang Bin<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Basin Cove GPADC Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * ADS7955 SPI ADC driver
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dave Hunt <dave.hunt@emutex.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+/*
+ * [FIXME]
+ * Notes: This version of the ti-ads7955 driver is written with a couple of
+ * workarounds for the functionality of the SPI driver on Edison at the time
+ * of writing.
+ * Issue 1: The CS is pushed low between every frame
+ * Issue 2: spi_message_add_tail() can only be called once in the driver.
+ * Subsequent messages are ignored.
+*/
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ti-ads7955.h>
+
+#define ADS7955_EXTREF true
+#define SPI_MAX_SPEED_HZ 20000000
+#define SPI_BITS_PER_WORD 16
+
+#define ADS7955_MANUAL_MODE (0x1 << 12) /* Selects Manual Mode */
+#define ADS7955_AUTO1_MODE (0x2 << 12) /* Selects Auto Mode 1 */
+#define ADS7955_AUTO2_MODE (0x3 << 12) /* Selects Auto Mode 2 */
+#define ADS7955_AUTO1_PROGRAM (0x8 << 12) /* Programming Auto Mode 1 */
+#define ADS7955_AUTO2_PROGRAM (0x9 << 12) /* Programming Auto Mode 2 */
+
+#define ADS7955_CONFIG BIT(11) /* program bits DI06-00 */
+#define ADS7955_AUTO1_RESET BIT(10) /* Reset to first channel */
+#define ADS7955_CHANNEL(x) ((x & 0xf) << 7)/* Channel select (DI10-07) */
+
+#define ADS7955_RANGE_1 0 /* Selects 2.5V input range */
+#define ADS7955_RANGE_2 BIT(6) /* Selects 5.0V input range */
+
+#define ADS7955_POWER_NORMAL 0 /* No Powerdown */
+#define ADS7955_POWER_DOWN BIT(5) /* Powerdown on last edge */
+
+#define ADS7955_GET_CONVERSION 0 /* High bits have ch index*/
+#define ADS7955_GET_GPIO BIT(4) /* High bits have GPIO bits */
+
+#define ADS7955_SET_READ (ADS7955_MANUAL_MODE | ADS7955_CONFIG | \
+ ADS7955_RANGE_2 | ADS7955_POWER_NORMAL | \
+ ADS7955_GET_CONVERSION)
+
+#define ADS7955_READ_AUTO1 (ADS7955_AUTO1_MODE | ADS7955_CONFIG | \
+ ADS7955_RANGE_2 | ADS7955_POWER_NORMAL | \
+ ADS7955_GET_CONVERSION)
+
+#define ADS7955_MAX_CHAN 8
+#define ADS7955_BITS 12
+#define ADS7955_STORAGE_BITS 16
+/*
+ * Define the Reference Voltage for the board on which this ADC is used.
+ * May change depending on jumper settings or wiring configuration.
+ */
+#define ADS7955_INTREF_mV 5000
+#define SPI_MSG_MAX_LEN 20 /* 8 channels plus timestamp */
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+struct ads7955_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned ext_ref;
+ struct spi_transfer ring_xfer[10];
+ struct spi_transfer scan_single_xfer[3];
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __u16 rx_buf[SPI_MSG_MAX_LEN] ____cacheline_aligned;
+ __u16 tx_buf[SPI_MSG_MAX_LEN];
+};
+
+#define ADS7955_V_CHAN(index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = ADS7955_BITS, \
+ .storagebits = ADS7955_STORAGE_BITS, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec ads7955_channels[] = {
+ ADS7955_V_CHAN(0),
+ ADS7955_V_CHAN(1),
+ ADS7955_V_CHAN(2),
+ ADS7955_V_CHAN(3),
+ ADS7955_V_CHAN(4),
+ ADS7955_V_CHAN(5),
+ ADS7955_V_CHAN(6),
+ ADS7955_V_CHAN(7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+/**
+ * ads7955_update_scan_mode() setup the spi transfer buffer for the scan mask
+ **/
+static int ads7955_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ads7955_state *st = iio_priv(indio_dev);
+ int i, ret;
+ unsigned short channel_count;
+
+ /*
+ * For programming the auto1 mode, we need to send two words, one to
+ * specify program mode, and the other to give a bitmask of channels
+ * to be read when reading the auto sequence.
+ */
+ /*
+ * [FIXME]
+ * Workaround: Build up a custom SPI message containing all required
+ * frames (including space for expected responses), and send as one
+ * SPI messge. This is to get around the issue that the current SPI
+ * driver only supports the first 'spi_message_add_tail' call.
+ */
+ st->tx_buf[0] = ADS7955_AUTO1_PROGRAM;
+ st->tx_buf[1] = (unsigned short)*active_scan_mask;
+ st->tx_buf[2] = (ADS7955_SET_READ | ADS7955_POWER_DOWN);
+
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * So now we've told the hardware about the channels we want to sample,
+ * now we set up the message sequence for when we're triggered.
+ */
+ /*
+ * [FIXME]
+ * Workaround: Build up a custom SPI message containing all required
+ * frames (including space for expected responses), and send as one
+ * SPI messge. This is to get around the issue that the current SPI
+ * driver only supports the first 'spi_message_add_tail' call.
+ */
+ channel_count = 0;
+ for (i = 0; i < ADS7955_MAX_CHAN; i++) {
+ if (test_bit(i, active_scan_mask)) {
+ if (channel_count == 0)
+ st->tx_buf[channel_count] = (ADS7955_READ_AUTO1
+ | ADS7955_AUTO1_RESET);
+ else
+ st->tx_buf[channel_count] =
+ (ADS7955_READ_AUTO1);
+ channel_count++;
+ }
+ }
+
+ /* Put in some extra tx frames to allow us to get the
+ rx frames (behind tx by two frames) */
+ st->tx_buf[channel_count++] = (ADS7955_READ_AUTO1);
+ st->tx_buf[channel_count++] = (ADS7955_READ_AUTO1 |
+ ADS7955_POWER_DOWN);
+
+ st->ring_xfer[0].tx_buf = &st->tx_buf[0];
+ st->ring_xfer[0].rx_buf = &st->rx_buf[0];
+ st->ring_xfer[0].len = channel_count * 2;
+ spi_message_init(&st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
+ return 0;
+}
+
+/**
+ * ads7955_trigger_handler() bh of trigger launched polling to ring buffer
+ **/
+static irqreturn_t ads7955_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ads7955_state *st = iio_priv(indio_dev);
+ u8 *return_data = (u8 *)&(st->rx_buf[2]);
+ s64 time_ns = 0;
+ int ret;
+
+ ret = spi_sync(st->spi, &st->ring_msg);
+ if (ret)
+ return IRQ_HANDLED;
+
+ if (indio_dev->scan_timestamp) {
+ time_ns = iio_get_time_ns();
+ memcpy(return_data +
+ indio_dev->scan_bytes - sizeof(s64),
+ &time_ns, sizeof(time_ns));
+ }
+
+ iio_push_to_buffers(indio_dev, return_data);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ads7955_scan_direct(struct ads7955_state *st, unsigned ch)
+{
+ int ret;
+
+ /*
+ * [FIXME]
+ * Workaround: Build up a custom SPI message containing all required
+ * frames (including space for expected responses), and send as one
+ * SPI messge. This is to get around the issue that the current SPI
+ * driver only supports the first 'spi_message_add_tail' call.
+ */
+ st->tx_buf[0] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch));
+ st->tx_buf[1] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch));
+ st->tx_buf[2] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch) |
+ ADS7955_POWER_DOWN);
+
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ return ret;
+ return st->rx_buf[2];
+}
+
+
+static int ads7955_get_ref_voltage(struct ads7955_state *st)
+{
+ int vref;
+
+ if (st->ext_ref) {
+ vref = regulator_get_voltage(st->reg);
+ if (vref < 0)
+ return vref;
+
+ return vref / 1000;
+ } else {
+ return ADS7955_INTREF_mV;
+ }
+}
+
+static int ads7955_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ads7955_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ ret = -EBUSY;
+ else
+ ret = ads7955_scan_direct(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & RES_MASK(ADS7955_BITS);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = ads7955_get_ref_voltage(st);
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ads7955_info = {
+ .read_raw = &ads7955_read_raw,
+ .update_scan_mode = ads7955_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int ads7955_probe(struct spi_device *spi)
+{
+ struct ads7955_platform_data *pdata = spi->dev.platform_data;
+ struct ads7955_state *st;
+ struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ int ret;
+
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ if (pdata && pdata->ext_ref)
+ st->ext_ref = ADS7955_EXTREF;
+
+ if (st->ext_ref) {
+ st->reg = regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_free;
+ }
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ads7955_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads7955_channels);
+ indio_dev->info = &ads7955_info;
+
+ /*
+ * Setup default message
+ * [FIXME]
+ * Workaround: Send each frame as 16 bits to get over the fact that
+ * the current SPI hardware pulls CS low between every frame.
+ */
+ spi->bits_per_word = SPI_BITS_PER_WORD;
+ spi->max_speed_hz = SPI_MAX_SPEED_HZ;
+ spi_setup(spi);
+
+ st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[0].rx_buf = &st->rx_buf[0];
+ st->scan_single_xfer[0].len = 6;
+
+ spi_message_init(&st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ads7955_trigger_handler, NULL);
+ if (ret) {
+ dev_warn(&indio_dev->dev,
+ "Failed to set up iio_triggered_buffer_setup\n");
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+
+ return 0;
+
+error_cleanup_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ if (st->ext_ref)
+ regulator_disable(st->reg);
+error_put_reg:
+ if (st->ext_ref)
+ regulator_put(st->reg);
+error_free:
+ iio_device_free(indio_dev);
+
+ return ret;
+}
+
+static int ads7955_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ads7955_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->ext_ref) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ads7955_id[] = {
+ {"ads7955", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ads7955_id);
+
+static struct spi_driver ads7955_driver = {
+ .driver = {
+ .name = "ads7955",
+ .owner = THIS_MODULE,
+ },
+ .probe = ads7955_probe,
+ .remove = ads7955_remove,
+ .id_table = ads7955_id,
+};
+module_spi_driver(ads7955_driver);
+
+MODULE_AUTHOR("Dave Hunt <dave.hunt@emutex.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS7955 ADC");
+MODULE_LICENSE("GPL v2");
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
+ [IIO_RESISTANCE] = "resistance",
};
static const char * const iio_modifier_names[] = {
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
+int iio_channel_get_num(const struct iio_channel *chan)
+{
+ int num = 0;
+
+ if (chan == NULL)
+ return -ENODEV;
+
+ while (chan[num].indio_dev)
+ num++;
+
+ return num;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_num);
+
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name)
+{
+ int i = 0;
+ struct iio_map_internal *c = NULL;
+
+ if (chan == NULL)
+ return -ENODEV;
+
+ if (chan_name == NULL)
+ return -EINVAL;
+
+ while (chan[i].indio_dev) {
+ mutex_lock(&iio_map_list_lock);
+ list_for_each_entry(c, &iio_map_list, l) {
+ if (strcmp(chan[i].channel->datasheet_name,
+ c->map->adc_channel_label) != 0)
+ continue;
+ strcpy(chan_name[i], c->map->consumer_channel);
+ break;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ i++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_name);
+
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
}
EXPORT_SYMBOL_GPL(iio_read_channel_raw);
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = chan->indio_dev->info->read_all_raw(chan, val);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_all_raw);
+
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed, unsigned int scale)
{
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
#define MSIC_VENDOR(id) ((id >> 6) & 3)
#define MSIC_VERSION(id) (id & 0x3f)
an Intel Atom (non-netbook) mobile device containing a MIPI
P1149.7 standard implementation.
+config INTEL_PTI_STM
+ tristate "MIPI Sytem Trace Macro (STM) for Intel"
+ default n
+ depends on INTEL_MID_PTI
+ help
+ The STM (Sytem Trace Monitor) driver control trace data
+ route through an Intel Tangier PTI port or through USB xDCI
+ interface with Debug-Class DvC.Trace support.
+
+ It provide the ability to PTI driver to setup the output and
+ to user to change the output with sysfs and exported header.
+
config SGI_IOC4
tristate "SGI IOC4 Base IO support"
depends on PCI
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config EMMC_IPANIC
+ bool "Intel kernel panic diagnostics driver FOR EMMC"
+ default n
+ ---help---
+ Driver which handles kernel panics and attempts to write
+ critical debugging data to EMMC.
+
+config EMMC_IPANIC_PLABEL
+ string "Intel kernel panic driver (EMMC_IPANIC) partition label"
+ depends on EMMC_IPANIC
+ default "panic"
+ ---help---
+ Set the default mmc partition label for EMMC_IPANIC driver.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
+source "drivers/misc/bcm-lpm/Kconfig"
endmenu
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_INTEL_MID_PTI) += pti.o
+obj-$(CONFIG_INTEL_PTI_STM) += stm.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
+obj-$(CONFIG_BCM_BT_LPM) +=bcm-lpm/
+obj-$(CONFIG_EMMC_IPANIC) += emmc_ipanic.o
--- /dev/null
+config BCM_BT_LPM
+ tristate "Broadcom Bluetooth Low Power Mode"
+ depends on SERIAL_MFD_HSU
+ default m
+ help
+ Select this module for Broadcom Bluetooth low power management.
--- /dev/null
+obj-$(CONFIG_BCM_BT_LPM) += bcm_bt_lpm.o
--- /dev/null
+/*
+ * Bluetooth Broadcomm and low power control via GPIO
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/hrtimer.h>
+#include <linux/irq.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#ifndef CONFIG_ACPI
+#include <asm/bcm_bt_lpm.h>
+#else
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+enum {
+ gpio_wake_acpi_idx,
+ gpio_enable_bt_acpi_idx,
+ host_wake_acpi_idx
+};
+#endif
+
+static struct rfkill *bt_rfkill;
+static bool bt_enabled;
+static bool host_wake_uart_enabled;
+static bool wake_uart_enabled;
+static bool int_handler_enabled;
+
+#define LPM_ON
+
+static void activate_irq_handler(void);
+
+struct bcm_bt_lpm {
+ unsigned int gpio_wake;
+ unsigned int gpio_host_wake;
+ unsigned int int_host_wake;
+ unsigned int gpio_enable_bt;
+
+ int wake;
+ int host_wake;
+
+ struct hrtimer enter_lpm_timer;
+ ktime_t enter_lpm_delay;
+
+ struct device *tty_dev;
+
+ int port;
+} bt_lpm;
+
+#ifdef LPM_ON
+static void uart_enable(struct device *tty)
+{
+ pr_debug("%s: runtime get\n", __func__);
+ /* Tell PM runtime to power on the tty device and block s0i3 */
+ pm_runtime_get(tty);
+}
+
+static void uart_disable(struct device *tty)
+{
+ pr_debug("%s: runtime put\n", __func__);
+ /* Tell PM runtime to release tty device and allow s0i3 */
+ pm_runtime_put(tty);
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int bcm_bt_lpm_acpi_probe(struct platform_device *pdev)
+{
+ struct acpi_gpio_info info;
+ acpi_handle handle;
+ acpi_integer port;
+
+ /*
+ * Handle ACPI specific initializations.
+ */
+ dev_dbg(&pdev->dev, "BCM2E1A ACPI specific probe\n");
+
+ bt_lpm.gpio_enable_bt = acpi_get_gpio_by_index(&pdev->dev,
+ gpio_enable_bt_acpi_idx, &info);
+ if (!gpio_is_valid(bt_lpm.gpio_enable_bt)) {
+ pr_err("%s: gpio %d for gpio_enable_bt not valid\n", __func__,
+ bt_lpm.gpio_enable_bt);
+ return -EINVAL;
+ }
+
+#ifdef LPM_ON
+ bt_lpm.gpio_wake = acpi_get_gpio_by_index(&pdev->dev,
+ gpio_wake_acpi_idx, &info);
+ if (!gpio_is_valid(bt_lpm.gpio_wake)) {
+ pr_err("%s: gpio %d for gpio_wake not valid\n", __func__,
+ bt_lpm.gpio_wake);
+ return -EINVAL;
+ }
+
+ bt_lpm.gpio_host_wake = acpi_get_gpio_by_index(&pdev->dev,
+ host_wake_acpi_idx, &info);
+ if (!gpio_is_valid(bt_lpm.gpio_host_wake)) {
+ pr_err("%s: gpio %d for gpio_host_wake not valid\n", __func__,
+ bt_lpm.gpio_host_wake);
+ return -EINVAL;
+ }
+
+ bt_lpm.int_host_wake = gpio_to_irq(bt_lpm.gpio_host_wake);
+
+ pr_debug("%s: gpio_wake %d, gpio_host_wake %d, int_host_wake %d\n",
+ __func__,
+ bt_lpm.gpio_wake,
+ bt_lpm.gpio_host_wake,
+ bt_lpm.int_host_wake);
+#endif
+
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+ if (ACPI_FAILURE(acpi_evaluate_integer(handle, "UART", NULL, &port))) {
+ dev_err(&pdev->dev, "Error evaluating UART port number\n");
+
+ /* FIXME - Force port 0 if the information is missing from the
+ * ACPI table.
+ * That will be removed once the ACPI tables will all have been
+ * updated.
+ */
+ port = 0;
+ }
+
+ bt_lpm.port = port;
+ pr_debug("%s: UART port %d\n", __func__, bt_lpm.port);
+
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm43xx_bt_rfkill_set_power(void *data, bool blocked)
+{
+ /* rfkill_ops callback. Turn transmitter on when blocked is false */
+
+ if (!blocked) {
+ gpio_set_value(bt_lpm.gpio_wake, 1);
+ /*
+ * Delay advice by BRCM is min 2.5ns,
+ * setting it between 10 and 50us for more confort
+ */
+ usleep_range(10, 50);
+
+ gpio_set_value(bt_lpm.gpio_enable_bt, 1);
+ pr_debug("%s: turn BT on\n", __func__);
+ } else {
+ gpio_set_value(bt_lpm.gpio_enable_bt, 0);
+ pr_debug("%s: turn BT off\n", __func__);
+ }
+
+ bt_enabled = !blocked;
+
+ return 0;
+}
+
+static const struct rfkill_ops bcm43xx_bt_rfkill_ops = {
+ .set_block = bcm43xx_bt_rfkill_set_power,
+};
+
+#ifdef LPM_ON
+static void set_wake_locked(int wake)
+{
+ bt_lpm.wake = wake;
+
+ if (!wake_uart_enabled && wake) {
+ WARN_ON(!bt_lpm.tty_dev);
+ uart_enable(bt_lpm.tty_dev);
+ }
+
+ gpio_set_value(bt_lpm.gpio_wake, wake);
+
+ if (wake_uart_enabled && !wake) {
+ WARN_ON(!bt_lpm.tty_dev);
+ uart_disable(bt_lpm.tty_dev);
+ }
+ wake_uart_enabled = wake;
+}
+
+static enum hrtimer_restart enter_lpm(struct hrtimer *timer)
+{
+ pr_debug("%s\n", __func__);
+
+ set_wake_locked(0);
+
+ return HRTIMER_NORESTART;
+}
+
+
+static void update_host_wake_locked(int host_wake)
+{
+ if (host_wake == bt_lpm.host_wake)
+ return;
+
+ bt_lpm.host_wake = host_wake;
+
+ if (host_wake) {
+ if (!host_wake_uart_enabled) {
+ WARN_ON(!bt_lpm.tty_dev);
+ uart_enable(bt_lpm.tty_dev);
+ }
+ } else {
+ if (host_wake_uart_enabled) {
+ WARN_ON(!bt_lpm.tty_dev);
+ uart_disable(bt_lpm.tty_dev);
+ }
+ }
+
+ host_wake_uart_enabled = host_wake;
+
+}
+
+static irqreturn_t host_wake_isr(int irq, void *dev)
+{
+ int host_wake;
+
+ host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+
+ pr_debug("%s: lpm %s\n", __func__, host_wake ? "off" : "on");
+
+ irq_set_irq_type(irq, host_wake ? IRQF_TRIGGER_FALLING :
+ IRQF_TRIGGER_RISING);
+
+ if (!bt_lpm.tty_dev) {
+ bt_lpm.host_wake = host_wake;
+ return IRQ_HANDLED;
+ }
+
+ update_host_wake_locked(host_wake);
+
+ return IRQ_HANDLED;
+}
+
+static void activate_irq_handler(void)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ ret = request_irq(bt_lpm.int_host_wake, host_wake_isr,
+ IRQF_TRIGGER_RISING, "bt_host_wake", NULL);
+
+ if (ret < 0) {
+ pr_err("Error lpm request IRQ");
+ gpio_free(bt_lpm.gpio_wake);
+ gpio_free(bt_lpm.gpio_host_wake);
+ }
+}
+
+
+static void bcm_bt_lpm_wake_peer(struct device *dev)
+{
+ bt_lpm.tty_dev = dev;
+
+ /*
+ * the irq is enabled after the first host wake up signal.
+ * in the original code, the irq should be in levels but, since mfld
+ * does not support them, irq is triggering with edges.
+ */
+
+ if (!int_handler_enabled) {
+ int_handler_enabled = true;
+ activate_irq_handler();
+ }
+
+ hrtimer_try_to_cancel(&bt_lpm.enter_lpm_timer);
+
+ set_wake_locked(1);
+
+ hrtimer_start(&bt_lpm.enter_lpm_timer, bt_lpm.enter_lpm_delay,
+ HRTIMER_MODE_REL);
+
+}
+
+static int bcm_bt_lpm_init(struct platform_device *pdev)
+{
+ int ret;
+ struct device *tty_dev;
+
+ hrtimer_init(&bt_lpm.enter_lpm_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ bt_lpm.enter_lpm_delay = ktime_set(1, 0); /* 1 sec */
+ bt_lpm.enter_lpm_timer.function = enter_lpm;
+
+ bt_lpm.host_wake = 0;
+
+ if (bt_lpm.gpio_host_wake < 0) {
+ pr_err("Error bt_lpm.gpio_host_wake\n");
+ return -ENODEV;
+ }
+
+ ret = irq_set_irq_wake(bt_lpm.int_host_wake, 1);
+ if (ret < 0) {
+ pr_err("Error lpm set irq IRQ");
+ gpio_free(bt_lpm.gpio_wake);
+ gpio_free(bt_lpm.gpio_host_wake);
+ return ret;
+ }
+
+ tty_dev = intel_mid_hsu_set_wake_peer(bt_lpm.port,
+ bcm_bt_lpm_wake_peer);
+ if (!tty_dev) {
+ pr_err("Error no tty dev");
+ gpio_free(bt_lpm.gpio_wake);
+ gpio_free(bt_lpm.gpio_host_wake);
+ return -ENODEV;
+ }
+
+ bcm_bt_lpm_wake_peer(tty_dev);
+ return 0;
+}
+#endif
+
+#ifndef CONFIG_ACPI
+static int bcm43xx_bluetooth_pdata_probe(struct platform_device *pdev)
+{
+ struct bcm_bt_lpm_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata == NULL) {
+ pr_err("Cannot register bcm_bt_lpm drivers, pdata is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(pdata->gpio_enable)) {
+ pr_err("%s: gpio not valid\n", __func__);
+ return -EINVAL;
+ }
+
+#ifdef LPM_ON
+ if (!gpio_is_valid(pdata->gpio_wake) ||
+ !gpio_is_valid(pdata->gpio_host_wake)) {
+ pr_err("%s: gpio not valid\n", __func__);
+ return -EINVAL;
+ }
+#endif
+
+ bt_lpm.gpio_wake = pdata->gpio_wake;
+ bt_lpm.gpio_host_wake = pdata->gpio_host_wake;
+ bt_lpm.int_host_wake = pdata->int_host_wake;
+ bt_lpm.gpio_enable_bt = pdata->gpio_enable;
+
+ bt_lpm.port = pdata->port;
+
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+static int bcm43xx_bluetooth_probe(struct platform_device *pdev)
+{
+ bool default_state = true; /* off */
+ int ret = 0;
+
+ int_handler_enabled = false;
+
+#ifdef CONFIG_ACPI
+ if (ACPI_HANDLE(&pdev->dev)) {
+ /*
+ * acpi specific probe
+ */
+ pr_debug("%s for ACPI device %s\n", __func__,
+ dev_name(&pdev->dev));
+ if (bcm_bt_lpm_acpi_probe(pdev) < 0)
+ ret = -EINVAL;
+ } else
+ ret = -ENODEV;
+#else
+ ret = bcm43xx_bluetooth_pdata_probe(pdev);
+#endif
+
+ if (ret < 0) {
+ pr_err("%s: Cannot register platform data\n", __func__);
+ goto err_data_probe;
+ }
+
+ ret = gpio_request(bt_lpm.gpio_enable_bt, pdev->name);
+ if (ret < 0) {
+ pr_err("%s: Unable to request gpio %d\n", __func__,
+ bt_lpm.gpio_enable_bt);
+ goto err_gpio_enable_req;
+ }
+
+ ret = gpio_direction_output(bt_lpm.gpio_enable_bt, 0);
+ if (ret < 0) {
+ pr_err("%s: Unable to set int direction for gpio %d\n",
+ __func__, bt_lpm.gpio_enable_bt);
+ goto err_gpio_enable_dir;
+ }
+
+#ifdef LPM_ON
+ ret = gpio_request(bt_lpm.gpio_host_wake, pdev->name);
+ if (ret < 0) {
+ pr_err("%s: Unable to request gpio %d\n",
+ __func__, bt_lpm.gpio_host_wake);
+ goto err_gpio_host_wake_req;
+ }
+
+ ret = gpio_direction_input(bt_lpm.gpio_host_wake);
+ if (ret < 0) {
+ pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+ bt_lpm.gpio_host_wake);
+ goto err_gpio_host_wake_dir;
+ }
+
+ ret = gpio_request(bt_lpm.gpio_wake, pdev->name);
+ if (ret < 0) {
+ pr_err("%s: Unable to request gpio %d\n", __func__,
+ bt_lpm.gpio_wake);
+ goto err_gpio_wake_req;
+ }
+
+ ret = gpio_direction_output(bt_lpm.gpio_wake, 0);
+ if (ret < 0) {
+ pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+ bt_lpm.gpio_wake);
+ goto err_gpio_wake_dir;
+ }
+
+ pr_debug("%s: gpio_enable=%d, gpio_wake=%d, gpio_host_wake=%d\n",
+ __func__,
+ bt_lpm.gpio_enable_bt,
+ bt_lpm.gpio_wake,
+ bt_lpm.gpio_host_wake);
+#endif
+
+ bt_rfkill = rfkill_alloc("bcm43xx Bluetooth", &pdev->dev,
+ RFKILL_TYPE_BLUETOOTH, &bcm43xx_bt_rfkill_ops,
+ NULL);
+ if (unlikely(!bt_rfkill)) {
+ ret = -ENOMEM;
+ goto err_rfkill_alloc;
+ }
+
+ bcm43xx_bt_rfkill_set_power(NULL, default_state);
+ rfkill_init_sw_state(bt_rfkill, default_state);
+
+ ret = rfkill_register(bt_rfkill);
+ if (unlikely(ret))
+ goto err_rfkill_register;
+
+#ifdef LPM_ON
+ ret = bcm_bt_lpm_init(pdev);
+ if (ret)
+ goto err_lpm_init;
+#endif
+
+ return ret;
+
+err_lpm_init:
+ rfkill_unregister(bt_rfkill);
+err_rfkill_register:
+ rfkill_destroy(bt_rfkill);
+err_rfkill_alloc:
+#ifdef LPM_ON
+err_gpio_wake_dir:
+ gpio_free(bt_lpm.gpio_wake);
+err_gpio_wake_req:
+err_gpio_host_wake_dir:
+ gpio_free(bt_lpm.gpio_host_wake);
+err_gpio_host_wake_req:
+#endif
+err_gpio_enable_dir:
+ gpio_free(bt_lpm.gpio_enable_bt);
+err_gpio_enable_req:
+err_data_probe:
+ return ret;
+}
+
+static int bcm43xx_bluetooth_remove(struct platform_device *pdev)
+{
+ rfkill_unregister(bt_rfkill);
+ rfkill_destroy(bt_rfkill);
+
+ gpio_free(bt_lpm.gpio_enable_bt);
+#ifdef LPM_ON
+ gpio_free(bt_lpm.gpio_wake);
+ gpio_free(bt_lpm.gpio_host_wake);
+#endif
+ return 0;
+}
+#ifdef LPM_ON
+int bcm43xx_bluetooth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int host_wake;
+
+ pr_debug("%s\n", __func__);
+
+ if (!bt_enabled)
+ return 0;
+
+ disable_irq(bt_lpm.int_host_wake);
+ host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+ if (host_wake) {
+ enable_irq(bt_lpm.int_host_wake);
+ pr_err("%s suspend error, gpio %d set\n", __func__,
+ bt_lpm.gpio_host_wake);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int bcm43xx_bluetooth_resume(struct platform_device *pdev)
+{
+ pr_debug("%s\n", __func__);
+
+ if (bt_enabled)
+ enable_irq(bt_lpm.int_host_wake);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id bcm_id_table[] = {
+ /* ACPI IDs here */
+ { "BCM2E1A", 0 },
+ { "BCM2E3A", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, bcm_id_table);
+#endif
+
+static struct platform_driver bcm43xx_bluetooth_platform_driver = {
+ .probe = bcm43xx_bluetooth_probe,
+ .remove = bcm43xx_bluetooth_remove,
+#ifdef LPM_ON
+ .suspend = bcm43xx_bluetooth_suspend,
+ .resume = bcm43xx_bluetooth_resume,
+#endif
+ .driver = {
+ .name = "bcm_bt_lpm",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = ACPI_PTR(bcm_id_table),
+#endif
+ },
+};
+
+static int __init bcm43xx_bluetooth_init(void)
+{
+ bt_enabled = false;
+ return platform_driver_register(&bcm43xx_bluetooth_platform_driver);
+}
+
+static void __exit bcm43xx_bluetooth_exit(void)
+{
+ platform_driver_unregister(&bcm43xx_bluetooth_platform_driver);
+}
+
+
+module_init(bcm43xx_bluetooth_init);
+module_exit(bcm43xx_bluetooth_exit);
+
+MODULE_ALIAS("platform:bcm43xx");
+MODULE_DESCRIPTION("bcm43xx_bluetooth");
+MODULE_AUTHOR("Jaikumar Ganesh <jaikumar@google.com>");
+MODULE_LICENSE("GPL");
+
--- /dev/null
+/*
+ * drivers/misc/emmc_ipanic.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/mmc/host.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/pci.h>
+#include <linux/nmi.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/panic_gbuffer.h>
+#include "emmc_ipanic.h"
+
+#include <linux/kmsg_dump.h>
+
+static char *part_label = "";
+module_param(part_label, charp, 0);
+MODULE_PARM_DESC(part_label, "IPanic mmc partition device label (panic)");
+
+static u32 disable_emmc_ipanic;
+core_param(disable_emmc_ipanic, disable_emmc_ipanic, uint, 0644);
+
+static struct mmc_emergency_info emmc_info = {
+ .init = mmc_emergency_init,
+ .write = mmc_emergency_write,
+ .part_label = CONFIG_EMMC_IPANIC_PLABEL,
+};
+
+static unsigned char *ipanic_proc_entry_name[PROC_MAX_ENTRIES] = {
+ "emmc_ipanic_header",
+ "emmc_ipanic_console",
+ "emmc_ipanic_threads",
+ "emmc_ipanic_gbuffer"
+};
+
+static int in_panic;
+static struct emmc_ipanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static int log_offset[IPANIC_LOG_MAX];
+static int log_len[IPANIC_LOG_MAX]; /* sector count */
+static int log_size[IPANIC_LOG_MAX]; /* byte count */
+static size_t log_head[IPANIC_LOG_MAX];
+static size_t log_woff[IPANIC_LOG_MAX];
+static unsigned char last_chunk_buf[SECTOR_SIZE];
+static int last_chunk_buf_len;
+static DEFINE_MUTEX(drv_mutex);
+static void (*func_stream_emmc) (void);
+
+static struct kmsg_dumper ipanic_dumper;
+
+static void emmc_panic_erase(unsigned char *buffer, Sector * sect)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc = ctx->emmc;
+ unsigned char *read_buf_ptr = buffer;
+ Sector new_sect;
+ int rc;
+
+ if (!emmc) {
+ pr_err("%s:invalid emmc infomation\n", __func__);
+ return;
+ }
+
+ if (!read_buf_ptr || !sect) {
+ sect = &new_sect;
+ if (!emmc->bdev) {
+ pr_err("%s:invalid emmc block device\n", __func__);
+ goto out;
+ }
+ /* make sure the block device is open rw */
+ rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE,
+ emmc_panic_erase);
+ if (rc < 0) {
+ pr_err("%s: blk_dev_get failed!\n", __func__);
+ goto out;
+ }
+
+ /*read panic header */
+ read_buf_ptr =
+ read_dev_sector(emmc->bdev, emmc->start_block, sect);
+ if (!read_buf_ptr) {
+ pr_err("%s: read sector error(%llu)!\n",
+ __func__, (u64) emmc->start_block);
+ goto out;
+ }
+ }
+
+ /*write all zero to panic header */
+ lock_page(sect->v);
+ memset(read_buf_ptr, 0, SECTOR_SIZE);
+ set_page_dirty(sect->v);
+ unlock_page(sect->v);
+ sync_blockdev(emmc->bdev);
+
+ if (!read_buf_ptr)
+ put_dev_sector(*sect);
+out:
+ memset(&ctx->hdr, 0, SECTOR_SIZE);
+ return;
+}
+
+static int emmc_read(struct mmc_emergency_info *emmc, void *holder,
+ char *buffer, off_t offset, int count, bool to_user)
+{
+ unsigned char *read_ptr;
+ unsigned int sector_no;
+ off_t sector_offset;
+ Sector sect;
+ int rc;
+
+ if (!emmc) {
+ pr_err("%s:invalid emmc infomation\n", __func__);
+ return 0;
+ }
+ if (!emmc->bdev) {
+ pr_err("%s:invalid emmc block device\n", __func__);
+ return 0;
+ }
+
+ sector_no = offset >> SECTOR_SIZE_SHIFT;
+ sector_offset = offset & (SECTOR_SIZE - 1);
+ if (sector_no >= emmc->block_count) {
+ pr_err("%s: reading an invalid address\n", __func__);
+ return -EINVAL;
+ }
+
+ /* make sure the block device is open rw */
+ rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, holder);
+ if (rc < 0) {
+ pr_err("%s: blk_dev_get failed!\n", __func__);
+ return 0;
+ }
+
+ read_ptr = read_dev_sector(emmc->bdev, sector_no + emmc->start_block,
+ §);
+ if (!read_ptr) {
+ put_dev_sector(sect);
+ return -EINVAL;
+ }
+ /* count and read_ptr are updated to match flash page size */
+ if (count + sector_offset > SECTOR_SIZE)
+ count = SECTOR_SIZE - sector_offset;
+
+ if (sector_offset)
+ read_ptr += sector_offset;
+
+ if (to_user) {
+ if (copy_to_user(buffer, read_ptr, count)) {
+ pr_err("%s: Failed to copy buffer to User\n", __func__);
+ return 0;
+ }
+ } else
+ memcpy(buffer, read_ptr, count);
+
+ put_dev_sector(sect);
+
+ return count;
+}
+
+static ssize_t emmc_ipanic_gbuffer_proc_read(struct file *file,
+ char __user * buffer, size_t count,
+ loff_t * ppos)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ size_t log_len, log_head;
+ off_t log_off;
+ int rc;
+
+ if (!ctx) {
+ pr_err("%s:invalid panic handler\n", __func__);
+ return 0;
+ }
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&drv_mutex);
+
+ log_off = ctx->curr.log_offset[IPANIC_LOG_GBUFFER];
+ log_len = ctx->curr.log_length[IPANIC_LOG_GBUFFER];
+ log_head = ctx->curr.log_head[IPANIC_LOG_GBUFFER];
+
+ if (*ppos >= log_len) {
+ mutex_unlock(&drv_mutex);
+ return 0;
+ }
+
+ if (*ppos < log_len - log_head) {
+ /* No overflow (log_head == 0)
+ * or
+ * overflow 2nd part buf (log_head = log_woff)
+ * |-------w--------|
+ * off^
+ * |--------|
+ */
+ log_off += log_head;
+ log_len -= log_head;
+ } else {
+ /* 1st part buf
+ * |-------w--------|
+ * off^
+ * |-------|
+ */
+ *ppos -= (log_len - log_head);
+ log_len = log_head;
+ }
+
+ if ((*ppos + count) > log_len)
+ count = log_len - *ppos;
+
+ rc = emmc_read(ctx->emmc, emmc_ipanic_gbuffer_proc_read,
+ buffer, log_off + *ppos, count, true);
+ if (rc <= 0) {
+ mutex_unlock(&drv_mutex);
+ pr_err
+ ("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+ __func__, (u64) (log_off + *ppos), count);
+ return rc;
+ }
+
+ *ppos += rc;
+
+ mutex_unlock(&drv_mutex);
+
+ return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_by_log(struct file *file,
+ char __user * buffer, size_t count,
+ loff_t * ppos, int log)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ size_t file_length;
+ off_t file_offset;
+ int rc;
+
+ if (!ctx) {
+ pr_err("%s:invalid panic handler\n", __func__);
+ return 0;
+ }
+
+ if (!count)
+ return 0;
+
+ if (log < 0 || log > IPANIC_LOG_MAX) {
+ pr_err("%s: Bad log number (%d)\n", __func__, log);
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+
+ if (log == IPANIC_LOG_HEADER) {
+ file_length = ctx->hdr.log_size;
+ file_offset = offsetof(struct panic_header, panic);
+ } else {
+ file_length = ctx->curr.log_length[log];
+ file_offset = ctx->curr.log_offset[log];
+ }
+
+ if (*ppos >= file_length) {
+ mutex_unlock(&drv_mutex);
+ return 0;
+ }
+
+ if ((*ppos + count) > file_length)
+ count = file_length - *ppos;
+
+ rc = emmc_read(ctx->emmc, emmc_ipanic_proc_read_by_log,
+ buffer, file_offset + *ppos, count, true);
+ if (rc <= 0) {
+ mutex_unlock(&drv_mutex);
+ pr_err
+ ("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+ __func__, (u64) (file_offset + *ppos), count);
+ return rc;
+ }
+
+ *ppos += rc;
+
+ mutex_unlock(&drv_mutex);
+
+ return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_hdr(struct file *file,
+ char __user * buffer, size_t count,
+ loff_t * ppos)
+{
+ return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+ IPANIC_LOG_HEADER);
+}
+
+static ssize_t emmc_ipanic_proc_read0(struct file *file, char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+ IPANIC_LOG_CONSOLE);
+}
+
+static ssize_t emmc_ipanic_proc_read1(struct file *file, char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+ IPANIC_LOG_THREADS);
+}
+
+static void emmc_ipanic_remove_proc_work(struct work_struct *work)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ int log;
+
+ mutex_lock(&drv_mutex);
+ emmc_panic_erase(NULL, NULL);
+
+ for (log = 0; log < PROC_MAX_ENTRIES; log++) {
+ if (ctx->ipanic_proc_entry[log]) {
+ remove_proc_entry(ctx->ipanic_proc_entry_name
+ [log], NULL);
+ ctx->ipanic_proc_entry[log] = NULL;
+ }
+ }
+ mutex_unlock(&drv_mutex);
+}
+
+static ssize_t emmc_ipanic_proc_write(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ schedule_work(&proc_removal_work);
+ return count;
+}
+
+/* In section order inside panic partition : */
+static const struct file_operations ipanic_emmc_read_header_fops = {
+ .read = emmc_ipanic_proc_read_hdr,
+ .write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc0_fops = {
+ .read = emmc_ipanic_proc_read0,
+ .write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc1_fops = {
+ .read = emmc_ipanic_proc_read1,
+ .write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc_gbuffer_fops = {
+ .read = emmc_ipanic_gbuffer_proc_read,
+ .write = emmc_ipanic_proc_write
+};
+
+static void emmc_panic_notify_add(void)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc;
+ unsigned char *read_buf_ptr;
+ Sector sect;
+ int rc, idx_log, idx_proc;
+ int proc_entry_created = 0;
+
+ if (!ctx) {
+ pr_err("%s:invalid panic handler\n", __func__);
+ return;
+ }
+
+ emmc = ctx->emmc;
+ if (!emmc) {
+ pr_err("%s:invalid emmc infomation\n", __func__);
+ goto out_err;
+ }
+
+ if (!emmc->bdev) {
+ pr_err("%s:invalid emmc block device\n", __func__);
+ goto out_err;
+ }
+
+ /* make sure the block device is open rw */
+ rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE,
+ emmc_panic_notify_add);
+ if (rc < 0) {
+ pr_err("%s: blk_dev_get failed!\n", __func__);
+ goto out_err;
+ }
+
+ /* read panic header */
+ read_buf_ptr = read_dev_sector(emmc->bdev, emmc->start_block, §);
+ if (!read_buf_ptr) {
+ pr_err("%s: read sector error(%llu)!\n", __func__,
+ (u64) emmc->start_block);
+ return;
+ }
+
+ memcpy(&ctx->hdr, read_buf_ptr, sizeof(struct panic_header));
+
+ if (ctx->hdr.magic != PANIC_MAGIC) {
+ pr_info("%s: bad magic %x, no data available\n",
+ __func__, ctx->hdr.magic);
+ emmc_panic_erase(read_buf_ptr, §);
+ goto put_sector;
+ }
+
+ pr_info("%s: Data available in panic partition\n", __func__);
+
+ if (ctx->hdr.version != PHDR_VERSION) {
+ pr_err("%s: Version mismatch (%d != %d)\n",
+ __func__, ctx->hdr.version, PHDR_VERSION);
+ emmc_panic_erase(read_buf_ptr, §);
+ goto put_sector;
+ }
+
+ /* Create proc entry for the panic header */
+ ctx->ipanic_proc_entry[PROC_HEADER_INDEX] =
+ proc_create(ctx->ipanic_proc_entry_name
+ [PROC_HEADER_INDEX], S_IFREG | S_IRUGO, NULL,
+ &ipanic_emmc_read_header_fops);
+
+ if (!ctx->ipanic_proc_entry[PROC_HEADER_INDEX])
+ pr_err("%s: failed creating proc file\n", __func__);
+ else {
+ proc_entry_created = 1;
+ pr_info("%s: proc entry created: %s\n", __func__,
+ ctx->ipanic_proc_entry_name[PROC_HEADER_INDEX]);
+ }
+
+ /* read log_info to retrieve block numbers and offsets */
+ read_buf_ptr =
+ read_dev_sector(emmc->bdev, emmc->start_block + 1, §);
+ if (!read_buf_ptr) {
+ pr_err("%s: read sector error(%llu)!\n", __func__,
+ (u64) emmc->start_block + 1);
+ return;
+ }
+
+ memcpy(&ctx->curr, read_buf_ptr, sizeof(struct log_info));
+
+ /* Log files other than header */
+ for (idx_log = 0; idx_log < IPANIC_LOG_MAX; idx_log++) {
+
+ pr_info("%s: log file %u(%u, %u)\n", __func__, idx_log,
+ ctx->curr.log_offset[idx_log],
+ ctx->curr.log_length[idx_log]);
+
+ /* Skip empty file. */
+ if (ctx->curr.log_length[idx_log] == 0) {
+ pr_info("%s: empty log file %u\n", __func__, idx_log);
+ continue;
+ }
+
+ /* Create proc entry for console, threads and gbuffer log. */
+ if (idx_log == IPANIC_LOG_CONSOLE) {
+ idx_proc = PROC_CONSOLE_INDEX;
+ ctx->ipanic_proc_entry[PROC_CONSOLE_INDEX] =
+ proc_create(ctx->ipanic_proc_entry_name
+ [PROC_CONSOLE_INDEX], S_IFREG | S_IRUGO,
+ NULL, &ipanic_emmc0_fops);
+ } else if (idx_log == IPANIC_LOG_THREADS) {
+ idx_proc = PROC_THREADS_INDEX;
+ ctx->ipanic_proc_entry[PROC_THREADS_INDEX] =
+ proc_create(ctx->ipanic_proc_entry_name
+ [PROC_THREADS_INDEX], S_IFREG | S_IRUGO,
+ NULL, &ipanic_emmc1_fops);
+ } else if (idx_log == IPANIC_LOG_GBUFFER) {
+ idx_proc = PROC_GBUFFER_INDEX;
+ ctx->ipanic_proc_entry[PROC_GBUFFER_INDEX] =
+ proc_create(ctx->ipanic_proc_entry_name
+ [PROC_GBUFFER_INDEX], S_IFREG | S_IRUGO,
+ NULL, &ipanic_emmc_gbuffer_fops);
+ } else {
+ /* No proc entry for this index */
+ idx_proc = 0;
+ continue;
+ }
+ if (!ctx->ipanic_proc_entry[idx_proc])
+ pr_err("%s: failed creating proc file\n", __func__);
+ else {
+ proc_entry_created = 1;
+ pr_info("%s: proc entry created: %s\n",
+ __func__,
+ ctx->ipanic_proc_entry_name[idx_proc]);
+ }
+ }
+
+ if (!proc_entry_created)
+ emmc_panic_erase(read_buf_ptr, §);
+
+put_sector:
+ put_dev_sector(sect);
+ return;
+out_err:
+ ctx->emmc = NULL;
+}
+
+static void emmc_panic_notify_remove(void)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+
+ if (ctx->emmc && ctx->emmc->part_dev) {
+ put_device(ctx->emmc->part_dev);
+ ctx->emmc->bdev = NULL;
+ }
+
+ ctx->emmc = NULL;
+}
+
+static int emmc_ipanic_writeflashpage(struct mmc_emergency_info *emmc,
+ loff_t to, const u_char * buf)
+{
+ int rc;
+ size_t wlen = SECTOR_SIZE;
+
+ if (to >= emmc->start_block + emmc->block_count) {
+ pr_emerg("%s: panic partition is full.\n", __func__);
+ return 0;
+ }
+
+ rc = emmc->write((char *)buf, (unsigned int)to);
+ if (rc) {
+ pr_emerg("%s: Error writing data to flash (%d)\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return wlen;
+}
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int emmc_ipanic_write_console(struct mmc_emergency_info *emmc,
+ unsigned int off, int *actual_size)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ int saved_oip, rc, block_shift = 0, bounce_idx = 0;
+ size_t line_len = 0;
+ bool ret;
+
+ static unsigned char line[SECTOR_SIZE];
+
+ *actual_size = 0;
+ while (1) {
+ saved_oip = oops_in_progress;
+ oops_in_progress = 1;
+ bounce_idx = 0;
+
+ if (last_chunk_buf_len) {
+ memcpy(ctx->bounce, last_chunk_buf, last_chunk_buf_len);
+ bounce_idx += last_chunk_buf_len;
+ last_chunk_buf_len = 0;
+ }
+
+ do {
+ ret = kmsg_dump_get_line(&ipanic_dumper, false,
+ line, SECTOR_SIZE, &line_len);
+
+ if (ret) {
+ if (bounce_idx + line_len < SECTOR_SIZE) {
+ memcpy(ctx->bounce + bounce_idx,
+ line, line_len);
+ bounce_idx += line_len;
+ } else {
+ int len = SECTOR_SIZE - bounce_idx;
+ memcpy(ctx->bounce + bounce_idx,
+ line, len);
+ bounce_idx = SECTOR_SIZE;
+ memcpy(last_chunk_buf,
+ line + len, line_len - len);
+ last_chunk_buf_len = line_len - len;
+ }
+ }
+ } while (ret && (bounce_idx != SECTOR_SIZE));
+
+ oops_in_progress = saved_oip;
+
+ /* If it is the last chunk, just copy it to last chunk
+ * buffer and exit loop.
+ */
+ if (!ret) {
+ /* Leave the last chunk for next writing */
+ memcpy(last_chunk_buf, ctx->bounce, bounce_idx);
+ last_chunk_buf_len = bounce_idx;
+ break;
+ }
+
+ rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+ ctx->bounce);
+ if (rc <= 0) {
+ pr_emerg("%s: Flash write failed (%d)\n", __func__, rc);
+ return block_shift;
+ }
+
+ block_shift++;
+ *actual_size += SECTOR_SIZE;
+ }
+
+ return block_shift;
+}
+
+static void emmc_ipanic_flush_lastchunk_emmc(loff_t to,
+ int *size_written,
+ int *sector_written)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc = ctx->emmc;
+ int rc = 0;
+
+ if (last_chunk_buf_len) {
+ memset(last_chunk_buf + last_chunk_buf_len, 0,
+ SECTOR_SIZE - last_chunk_buf_len);
+
+ rc = emmc_ipanic_writeflashpage(emmc, to, last_chunk_buf);
+ if (rc <= 0) {
+ pr_emerg("emmc_ipanic: write last chunk failed (%d)\n",
+ rc);
+ return;
+ }
+
+ *size_written += last_chunk_buf_len;
+ (*sector_written)++;
+ last_chunk_buf_len = 0;
+ }
+ return;
+}
+
+static void emmc_ipanic_write_thread_func(void)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc = ctx->emmc;
+ int size_written;
+ int thread_sector_count;
+
+ thread_sector_count =
+ emmc_ipanic_write_console(emmc,
+ log_offset[IPANIC_LOG_THREADS] +
+ log_len[IPANIC_LOG_THREADS],
+ &size_written);
+ if (thread_sector_count < 0) {
+ pr_emerg("Error writing threads to panic log! (%d)\n",
+ log_len[IPANIC_LOG_THREADS]);
+ return;
+ }
+ log_size[IPANIC_LOG_THREADS] += size_written;
+ log_len[IPANIC_LOG_THREADS] += thread_sector_count;
+
+ /*reset the log buffer */
+ log_buf_clear();
+ kmsg_dump_rewind(&ipanic_dumper);
+}
+
+static void emmc_ipanic_write_logbuf(struct mmc_emergency_info *emmc, int log)
+{
+ /*
+ * Write the log data from the third block :
+ * - the first block is reserved for panic header
+ * - the second one is reserved for offset information
+ */
+ log_offset[log] = emmc->start_block + 2;
+ log_len[log] = emmc_ipanic_write_console(emmc, log_offset[log],
+ &log_size[log]);
+ if (log_size[log] < 0) {
+ pr_emerg("Error writing console to panic log! (%d)\n",
+ log_len[log]);
+ log_size[log] = 0;
+ log_len[log] = 0;
+ }
+ /* flush last chunk buffer for console */
+ emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+ log_len[log],
+ &log_size[log], &log_len[log]);
+}
+
+static void emmc_ipanic_write_calltrace(struct mmc_emergency_info *emmc,
+ int log)
+{
+ log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+ /*
+ * config func_stream_emmc to emmc_ipanic_write_thread_func to
+ * stream thread call trace.
+ */
+ log_buf_clear();
+ kmsg_dump_rewind(&ipanic_dumper);
+ func_stream_emmc = emmc_ipanic_write_thread_func;
+ show_state_filter(0);
+
+ /* flush last chunk buffer */
+ emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+ log_len[log],
+ &log_size[log], &log_len[log]);
+}
+
+static int emmc_ipanic_write_gbuffer_data(struct mmc_emergency_info *emmc,
+ struct g_buffer_header *gbuffer,
+ unsigned int off, int *actual_size)
+{
+ int rc, block_shift = 0;
+ size_t log_off = 0;
+ size_t log_size;
+ unsigned char *buf = gbuffer->base;
+
+ if (gbuffer->head)
+ /* has overflow */
+ log_size = gbuffer->size;
+ else
+ /* no overflow */
+ log_size = gbuffer->woff;
+
+ while (log_off < log_size) {
+ size_t size_copy = log_size - log_off;
+ if (size_copy < SECTOR_SIZE) {
+ /*
+ * flash page not complete, flushed with
+ * emmc_ipanic_flush_lastchunk_emmc
+ */
+ memcpy(last_chunk_buf, buf + log_off, size_copy);
+ last_chunk_buf_len = size_copy;
+ break;
+ }
+ rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+ buf + log_off);
+ if (rc <= 0) {
+ pr_emerg("%s: Flash write failed (%d)\n", __func__, rc);
+ return 0;
+ }
+ log_off += rc;
+ block_shift++;
+ }
+ *actual_size = log_off;
+
+ return block_shift;
+}
+
+static struct g_buffer_header gbuffer = {
+ .base = NULL,
+};
+
+static void emmc_ipanic_write_gbuffer(struct mmc_emergency_info *emmc, int log)
+{
+ struct g_buffer_header *m_gbuffer = &gbuffer;
+
+ log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+
+ pr_info("write gbuffer data\n");
+ if (!m_gbuffer->base) {
+ pr_err("Ipanic error, no gbuffer data\n");
+ return;
+ }
+
+ log_len[log] = emmc_ipanic_write_gbuffer_data(emmc, m_gbuffer,
+ log_offset[log],
+ &log_size[log]);
+ if (log_len[log] < 0) {
+ pr_emerg("Error writing gbuffer to panic log! (%d)\n",
+ log_len[log]);
+ log_size[log] = 0;
+ log_len[log] = 0;
+ }
+ /* flush last chunk buffer */
+ emmc_ipanic_flush_lastchunk_emmc(log_offset[log] + log_len[log],
+ &log_size[log], &log_len[log]);
+ log_head[log] = m_gbuffer->head;
+ log_woff[log] = m_gbuffer->woff;
+ pr_info("write gbuffer data END\n");
+}
+
+/*
+ * Exported in <linux/panic_gbuffer.h>
+ */
+void panic_set_gbuffer(struct g_buffer_header *buf)
+{
+ if (gbuffer.base) {
+ pr_err("%s: gbuffer already set to 0x%p, can not set again",
+ __func__, gbuffer.base);
+ return;
+ }
+
+ gbuffer.base = buf->base;
+ gbuffer.size = buf->size;
+ gbuffer.woff = buf->woff;
+ gbuffer.head = buf->head;
+}
+
+EXPORT_SYMBOL(panic_set_gbuffer);
+
+static void emmc_ipanic_write_pageheader(struct mmc_emergency_info *emmc)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct panic_header *hdr = (struct panic_header *)ctx->bounce;
+ int wc;
+ size_t len, total, max;
+
+ memset(ctx->bounce, 0, SECTOR_SIZE);
+ hdr->magic = PANIC_MAGIC;
+ hdr->version = PHDR_VERSION;
+
+ total = snprintf(hdr->panic, SECTOR_SIZE, "###Kernel panic###\n");
+
+ max = SECTOR_SIZE - offsetof(struct panic_header, panic) - total;
+ kmsg_dump_get_buffer(&ipanic_dumper, false, last_chunk_buf, max, &len);
+ kmsg_dump_rewind(&ipanic_dumper);
+
+ memcpy(hdr->panic + total, last_chunk_buf, len);
+ hdr->log_size = len + total;
+
+ /* Write header block */
+ wc = emmc_ipanic_writeflashpage(emmc, emmc->start_block, ctx->bounce);
+ if (wc <= 0) {
+ pr_emerg("emmc_ipanic: Info write failed (%d)\n", wc);
+ return;
+ }
+}
+
+static void emmc_ipanic_clean_loginfo(struct mmc_emergency_info *emmc)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ int rc;
+
+ memset(log_offset, 0, IPANIC_LOG_MAX * sizeof(int));
+ memset(log_len, 0, IPANIC_LOG_MAX * sizeof(int));
+ memset(log_size, 0, IPANIC_LOG_MAX * sizeof(int));
+
+ memset(ctx->bounce, 0, SECTOR_SIZE);
+
+ rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block + 1,
+ ctx->bounce);
+ if (rc <= 0) {
+ pr_emerg("emmc_ipanic: Header write failed (%d)\n", rc);
+ return;
+ }
+}
+
+static void emmc_ipanic_write_loginfo(struct mmc_emergency_info *emmc,
+ int newlog)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct log_info *info = (struct log_info *)ctx->bounce;
+ int log = IPANIC_LOG_CONSOLE;
+ int rc;
+
+ if ((newlog < 0) || (newlog >= IPANIC_LOG_MAX))
+ return;
+
+ if (log_size[newlog] == 0)
+ return;
+
+ memset(ctx->bounce, 0, SECTOR_SIZE);
+ /*Fill up log offset and size */
+ while (log < IPANIC_LOG_MAX) {
+ /*Configurate log offset and log size */
+ info->log_offset[log] = (log_offset[log] - emmc->start_block)
+ << SECTOR_SIZE_SHIFT;
+ info->log_length[log] = log_size[log];
+ info->log_head[log] = log_head[log];
+ info->log_woff[log] = log_woff[log];
+ log++;
+ }
+ rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block + 1,
+ ctx->bounce);
+ if (rc <= 0) {
+ pr_emerg("emmc_ipanic: Header write failed (%d)\n", rc);
+ return;
+ }
+}
+
+static int emmc_ipanic(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc;
+ int rc, log;
+
+ pr_emerg("panic notified\n");
+
+ if (in_panic || disable_emmc_ipanic)
+ return NOTIFY_DONE;
+
+ in_panic = 1;
+
+#ifdef CONFIG_PREEMPT
+ /* Ensure that cond_resched() won't try to preempt anybody */
+ add_preempt_count(PREEMPT_ACTIVE);
+#endif
+ touch_nmi_watchdog();
+
+ if (!ctx)
+ goto out;
+ emmc = ctx->emmc;
+ if (!emmc)
+ goto out;
+ if (ctx->hdr.magic) {
+ pr_emerg("Crash partition in use!\n");
+ goto out;
+ }
+
+ rc = emmc->init();
+ if (rc) {
+ /* String too long to fit on 1 80-char line */
+ pr_emerg("%s %s, rc=%d\n",
+ "Emmc emergency driver is",
+ "not initialized successfully!", rc);
+ goto out;
+ }
+
+ /* Prepare kmsg dumper */
+ ipanic_dumper.active = 1;
+ /* Rewind kmsg dumper */
+ kmsg_dump_rewind(&ipanic_dumper);
+
+ /* Write emmc ipanic partition header */
+ emmc_ipanic_write_pageheader(emmc);
+ /* Clean emmc ipanic sections offsets */
+ emmc_ipanic_clean_loginfo(emmc);
+
+ /*Write all buffer into emmc */
+ log = IPANIC_LOG_CONSOLE;
+ while (log < IPANIC_LOG_MAX) {
+ /* Clear temporary buffer */
+ memset(ctx->bounce, 0, SECTOR_SIZE);
+ /* Log every buffer into emmc */
+ switch (log) {
+ case IPANIC_LOG_CONSOLE:
+ emmc_ipanic_write_logbuf(emmc, log);
+ break;
+ case IPANIC_LOG_THREADS:
+ emmc_ipanic_write_calltrace(emmc, log);
+ break;
+ case IPANIC_LOG_GBUFFER:
+ emmc_ipanic_write_gbuffer(emmc, log);
+ break;
+ default:
+ break;
+ }
+ /* Update emmc ipanic sections offsets */
+ emmc_ipanic_write_loginfo(emmc, log);
+ log++;
+ }
+ pr_info("Panic log data written done!\n");
+
+ ipanic_dumper.active = 0;
+
+out:
+#ifdef CONFIG_PREEMPT
+ sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+ .notifier_call = emmc_ipanic,
+ .priority = 100,
+};
+
+static int panic_dbg_get(void *data, u64 * val)
+{
+ emmc_ipanic(NULL, 0, NULL);
+ return 0;
+}
+
+static int panic_dbg_set(void *data, u64 val)
+{
+ BUG();
+ return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n");
+
+static int match_dev_panic_part(struct device *dev, const void *data)
+{
+ struct hd_struct *part = dev_to_part(dev);
+ const char *name = (char *)data;
+
+ return part->info && !strcmp(name, part->info->volname);
+}
+
+static int emmc_panic_partition_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct emmc_ipanic_data *ctx = &drv_ctx;
+ struct mmc_emergency_info *emmc;
+ struct gendisk *disk;
+
+ if (!ctx) {
+ pr_err("%s:invalid panic handler\n", __func__);
+ return 0;
+ }
+
+ emmc = ctx->emmc;
+ if (!emmc) {
+ pr_err("%s:invalid emmc information\n", __func__);
+ return 0;
+ }
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BOUND_DRIVER:
+ /* if emmc already found, exit the function */
+ if (emmc->bdev)
+ return 0;
+
+ emmc->part_dev = class_find_device(&block_class, NULL,
+ emmc->part_label,
+ &match_dev_panic_part);
+ if (emmc->part_dev) {
+ emmc->part = dev_to_part(emmc->part_dev);
+ if (!emmc->part) {
+ pr_err("unable to get partition\n");
+ goto put_dev;
+ }
+
+ disk = part_to_disk(emmc->part);
+ if (!disk) {
+ pr_err("unable to get disk\n");
+ goto put_dev;
+ }
+
+ /* get whole disk */
+ emmc->bdev = bdget_disk(disk, 0);
+ if (!emmc->bdev) {
+ pr_err("unable to get emmc block device\n");
+ goto put_dev;
+ }
+
+ emmc->start_block = emmc->part->start_sect;
+ emmc->block_count = emmc->part->nr_sects;
+
+ pr_info("panic partition found, label:%s, device:%s\n",
+ emmc->part_label, dev_name(emmc->part_dev));
+
+ /* notify to add the panic device */
+ emmc_panic_notify_add();
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_blk);
+
+ INIT_WORK(&proc_removal_work,
+ emmc_ipanic_remove_proc_work);
+ }
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBIND_DRIVER:
+ if (match_dev_panic_part(dev, emmc->part_label)) {
+ pr_info("bus notify removed device '%s', cleaning.\n",
+ dev_name(dev));
+ flush_scheduled_work();
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_blk);
+ emmc_panic_notify_remove();
+ }
+ break;
+ case BUS_NOTIFY_BIND_DRIVER:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ /* Nothing to do here, but we don't want
+ * these actions to generate error messages,
+ * so we need to catch them
+ */
+ break;
+ default:
+ pr_err("Unknown action (%lu) on %s\n", action, dev_name(dev));
+ return 0;
+ }
+ return 1;
+
+put_dev:
+ put_device(emmc->part_dev);
+ return 0;
+}
+
+static struct notifier_block panic_partition_notifier = {
+ .notifier_call = emmc_panic_partition_notify,
+};
+
+void emmc_ipanic_stream_emmc(void)
+{
+ if (func_stream_emmc)
+ (*func_stream_emmc) ();
+}
+
+EXPORT_SYMBOL(emmc_ipanic_stream_emmc);
+
+static struct dentry *emmc_ipanic_d;
+static struct dentry *emmc_ipanic_disable_d;
+
+static int __init emmc_ipanic_init(void)
+{
+ /* initialization of drv_ctx */
+ memset(&drv_ctx, 0, sizeof(drv_ctx));
+ drv_ctx.emmc = &emmc_info;
+
+ if (*part_label)
+ strcpy(emmc_info.part_label, part_label);
+
+ drv_ctx.ipanic_proc_entry_name = ipanic_proc_entry_name;
+ drv_ctx.bounce = (void *)__get_free_page(GFP_KERNEL);
+
+ bus_register_notifier(&pci_bus_type, &panic_partition_notifier);
+
+ emmc_ipanic_d = debugfs_create_file("emmc_ipanic", 0644, NULL, NULL,
+ &panic_dbg_fops);
+ emmc_ipanic_disable_d = debugfs_create_u32("disable_emmc_ipanic", 0644,
+ NULL, &disable_emmc_ipanic);
+
+ pr_info("init success\n");
+
+ return 0;
+}
+
+static void __exit emmc_ipanic_exit(void)
+{
+ debugfs_remove(emmc_ipanic_d);
+ debugfs_remove(emmc_ipanic_disable_d);
+ bus_unregister_notifier(&pci_bus_type, &panic_partition_notifier);
+ flush_scheduled_work();
+ atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+ emmc_panic_notify_remove();
+}
+
+module_init(emmc_ipanic_init);
+module_exit(emmc_ipanic_exit);
--- /dev/null
+/*
+ * drivers/misc/emmc_ipanic.h
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_EMMC_IPANIC_H
+#define _LINUX_EMMC_IPANIC_H
+
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+
+extern void log_buf_clear(void);
+
+#define SECTOR_SIZE_SHIFT (9)
+
+#define PROC_HEADER_INDEX 0
+#define PROC_CONSOLE_INDEX 1
+#define PROC_THREADS_INDEX 2
+#define PROC_GBUFFER_INDEX 3
+#define PROC_MAX_ENTRIES 4
+
+#define IPANIC_LOG_CONSOLE 0
+#define IPANIC_LOG_THREADS 1
+#define IPANIC_LOG_GBUFFER 2
+#define IPANIC_LOG_MAX 3
+#define IPANIC_LOG_HEADER IPANIC_LOG_MAX
+
+
+struct mmc_emergency_info {
+#define DISK_NAME_LENGTH 20
+ /* emmc panic partition label */
+ char part_label[PARTITION_META_INFO_VOLNAMELTH];
+
+ struct block_device *bdev;
+ struct device *part_dev;
+ struct hd_struct *part;
+
+ /*panic partition start block */
+ sector_t start_block;
+ /*panic partition block count */
+ sector_t block_count;
+
+ int (*init) (void);
+ int (*write) (char *, unsigned int);
+ int (*read) (char *, unsigned int);
+};
+
+struct panic_header {
+ u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+ u32 version;
+#define PHDR_VERSION 0x01
+ u32 log_size;
+
+ char panic[SECTOR_SIZE];
+};
+
+struct log_info {
+ u32 log_offset[IPANIC_LOG_MAX];
+ u32 log_length[IPANIC_LOG_MAX];
+
+ /* For logcat and generic buffer log status */
+ size_t log_head[IPANIC_LOG_MAX];
+ size_t log_woff[IPANIC_LOG_MAX];
+};
+
+struct emmc_ipanic_data {
+ struct mmc_emergency_info *emmc;
+ struct panic_header hdr;
+ struct log_info curr;
+ void *bounce;
+ struct proc_dir_entry *ipanic_proc_entry[PROC_MAX_ENTRIES];
+ unsigned char **ipanic_proc_entry_name;
+};
+
+#endif /* _LINUX_EMMC_IPANIC_H */
* compact JTAG, standard.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_INTEL_PTI_STM
+#include "stm.h"
+#endif
+
#define DRIVERNAME "pti"
#define PCINAME "pciPTI"
#define TTYNAME "ttyPTI"
#define APERTURE_14 0x3800000 /* offset to first OS write addr */
#define APERTURE_LEN 0x400000 /* address length */
+#define SMIP_PTI_OFFSET 0x30C /* offset to PTI config in MIP header */
+#define SMIP_PTI_EN (1<<7) /* PTI enable bit in PTI configuration */
+
+#define PTI_PNW_PCI_ID 0x082B
+#define PTI_CLV_PCI_ID 0x0900
+#define PTI_TNG_PCI_ID 0x119F
+
+#define INTEL_PTI_PCI_DEVICE(dev, info) { \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = dev, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+struct pti_device_info {
+ u8 pci_bar;
+ u8 scu_secure_mode:1;
+ u8 has_d8_d16_support:1;
+};
+
+static const struct pti_device_info intel_pti_pnw_info = {
+ .pci_bar = 1,
+ .scu_secure_mode = 0,
+ .has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_clv_info = {
+ .pci_bar = 1,
+ .scu_secure_mode = 1,
+ .has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_tng_info = {
+ .pci_bar = 2,
+ .scu_secure_mode = 0,
+ .has_d8_d16_support = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ INTEL_PTI_PCI_DEVICE(PTI_PNW_PCI_ID, &intel_pti_pnw_info),
+ INTEL_PTI_PCI_DEVICE(PTI_CLV_PCI_ID, &intel_pti_clv_info),
+ INTEL_PTI_PCI_DEVICE(PTI_TNG_PCI_ID, &intel_pti_tng_info),
+ {0}
+};
+
+#define GET_PCI_BAR(pti_dev) (pti_dev->pti_dev_info->pci_bar)
+#define HAS_SCU_SECURE_MODE(pti_dev) (pti_dev->pti_dev_info->scu_secure_mode)
+#define HAS_D8_D16_SUPPORT(pti_dev) (pti_dev->pti_dev_info->has_d8_d16_support)
+
struct pti_tty {
struct pti_masterchannel *mc;
};
u8 ia_app[MAX_APP_IDS];
u8 ia_os[MAX_OS_IDS];
u8 ia_modem[MAX_MODEM_IDS];
+ struct pti_device_info *pti_dev_info;
+#ifdef CONFIG_INTEL_PTI_STM
+ struct stm_dev stm;
+#endif
};
+static unsigned int stm_enabled;
+module_param(stm_enabled, uint, 0600);
+MODULE_PARM_DESC(stm_enabled, "set to 1 to enable stm");
+
/*
* This protects access to ia_app, ia_os, and ia_modem,
* which keeps track of channels allocated in
*/
static DEFINE_MUTEX(alloclock);
-static const struct pci_device_id pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
- {0}
-};
-
static struct tty_driver *pti_tty_driver;
static struct pti_dev *drv_data;
{
unsigned int a;
int retval = -EINVAL;
- int pci_bar = 1;
dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
__func__, __LINE__, pdev->vendor, pdev->device);
__func__, __LINE__);
goto err_disable_pci;
}
- drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
- retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ drv_data->pti_dev_info = (struct pti_device_info *)ent->driver_data;
+
+ drv_data->pti_addr = pci_resource_start(pdev, GET_PCI_BAR(drv_data));
+
+ retval = pci_request_region(pdev, GET_PCI_BAR(drv_data),
+ dev_name(&pdev->dev));
if (retval != 0) {
dev_err(&pdev->dev,
"%s(%d): pci_request_region() returned error %d\n",
goto err_rel_reg;
}
+#ifdef CONFIG_INTEL_PTI_STM
+ /* Initialize STM resources */
+ if ((stm_enabled) && (stm_dev_init(pdev, &drv_data->stm) != 0)) {
+ retval = -ENOMEM;
+ goto err_rel_reg;
+ }
+#endif
+
pci_set_drvdata(pdev, drv_data);
for (a = 0; a < PTITTY_MINOR_NUM; a++) {
return 0;
err_rel_reg:
- pci_release_region(pdev, pci_bar);
+ pci_release_region(pdev, GET_PCI_BAR(drv_data));
err_free_dd:
kfree(drv_data);
err_disable_pci:
tty_port_destroy(&drv_data->port[a]);
}
+#ifdef CONFIG_INTEL_PTI_STM
+ if (stm_enabled)
+ stm_dev_clean(pdev, &drv_data->stm);
+#endif
iounmap(drv_data->pti_ioaddr);
+ pci_release_region(pdev, GET_PCI_BAR(drv_data));
pci_set_drvdata(pdev, NULL);
kfree(drv_data);
- pci_release_region(pdev, 1);
pci_disable_device(pdev);
misc_deregister(&pti_char_driver);
--- /dev/null
+/*
+ * stm.c - MIPI STM Debug Unit driver
+ *
+ * Copyright (C) Intel 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sdm.h>
+
+#include "stm.h"
+#include <asm/intel_soc_debug.h>
+#include "../usb/dwc3/core.h"
+
+/* STM Registers */
+#define STM_CTRL 0x0000
+#define STM_USB3DBGGTHR 0x0008
+#define STM_MASMSK 0x0010
+#define STM_CHMSK 0x0080
+#define STM_AGTBAR0 0x00C0
+#define STM_AGTBAR1 0x0140
+#define STM_AGTBAR2 0x01C0
+#define STM_AGTBAR3 0x0240
+#define STM_AGTBAR4 0x02C0
+#define STM_AGTBAR5 0x0340
+#define STM_AGTBAR6 0x03C0
+#define STM_AGTBAR7 0x0440
+#define STM_AGTBAR8 0x04C0
+#define STM_AGTBAR9 0x0540
+#define STM_AGTBAR10 0x05C0
+#define STM_AGTBAR11 0x0640
+
+/*
+ * STM registers
+ */
+#define STM_REG_BASE 0x0 /* registers base offset */
+#define STM_REG_LEN 0x20 /* address length */
+/*
+ * TRB buffers
+ */
+#define STM_TRB_BASE 0x400 /* TRB base offset */
+#define STM_TRB_LEN 0x100 /* address length */
+#define STM_TRB_NUM 16 /* number of TRBs */
+
+/*
+ * This protects R/W to stm registers
+ */
+static DEFINE_MUTEX(stmlock);
+
+static struct stm_dev *_dev_stm;
+
+static inline u32 stm_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void stm_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+/**
+ * stm_kernel_set_out()-
+ * Kernel API function used to
+ * set STM output configuration to PTI or USB.
+ *
+ * @bus_type:
+ * 0 = PTI 4-bits legacy end user
+ * 1 = PTI 4-bits NiDnT
+ * 2 = PTI 16-bits
+ * 3 = PTI 12-bits
+ * 4 = PTI 8-bits
+ * 15 = USB Debug-Class (DvC.Trace)
+ *
+ */
+int stm_kernel_set_out(int bus_type)
+{
+
+ struct stm_dev *drv_stm = _dev_stm;
+
+ /*
+ * since this function is exported, this is treated like an
+ * API function, thus, all parameters should
+ * be checked for validity.
+ */
+ if (drv_stm == NULL)
+ return 0;
+
+ mutex_lock(&stmlock);
+
+ drv_stm->stm_ctrl_hwreg.reg_word =
+ stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+ switch (bus_type) {
+ case STM_PTI_4BIT_LEGACY:
+ case STM_PTI_4BIT_NIDNT:
+ case STM_PTI_16BIT:
+ case STM_PTI_12BIT:
+ case STM_PTI_8BIT:
+ drv_stm->stm_ctrl_hwreg.pti_out_en = true;
+ drv_stm->stm_ctrl_hwreg.usb_debug_en = false;
+ drv_stm->stm_ctrl_hwreg.pti_out_mode_sel = bus_type;
+ stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+ drv_stm->stm_ctrl_hwreg.reg_word);
+ break;
+ case STM_USB:
+ drv_stm->stm_ctrl_hwreg.pti_out_en = false;
+ drv_stm->stm_ctrl_hwreg.usb_debug_en = true;
+ stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+ drv_stm->stm_ctrl_hwreg.reg_word);
+ break;
+ default:
+ /* N/A */
+ break;
+ }
+ mutex_unlock(&stmlock);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_set_out);
+
+/**
+ * stm_kernel_get_out()-
+ * Kernel API function used to get
+ * STM output cofiguration PTI or USB.
+ *
+ */
+int stm_kernel_get_out(void)
+{
+ struct stm_dev *drv_stm = _dev_stm;
+ int ret = -EOPNOTSUPP;
+
+ if (drv_stm == NULL)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&stmlock);
+
+ drv_stm->stm_ctrl_hwreg.reg_word =
+ stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+ if (!drv_stm->stm_ctrl_hwreg.usb_debug_en) {
+ if (drv_stm->stm_ctrl_hwreg.pti_out_en)
+ ret = (int)drv_stm->stm_ctrl_hwreg.pti_out_mode_sel;
+ } else {
+ ret = (int)STM_USB;
+ }
+ mutex_unlock(&stmlock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_get_out);
+
+/**
+ * stm_set_out() - 'out' parameter set function from 'STM' module
+ *
+ * called when writing to 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_set_out(const char *val, struct kernel_param *kp)
+{
+ int bus_type_value;
+ int ret = -EINVAL;
+
+ if (sscanf(val, "%2d", &bus_type_value) != 1)
+ return ret;
+
+ return stm_kernel_set_out(bus_type_value);
+}
+
+/**
+ * stm_get_out() - 'out' parameter get function from 'STM' module
+ *
+ * called when reading 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_get_out(char *buffer, struct kernel_param *kp)
+{
+ int i;
+
+ i = stm_kernel_get_out();
+ if (i == -EOPNOTSUPP) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ return sprintf(buffer, "%2d", i);
+}
+
+/**
+ * stm_init() - initialize stmsub3dbgthr register
+ *
+ * @return - 0 on Success
+ */
+static int stm_init(void)
+{
+ struct stm_dev *stm = _dev_stm;
+ struct stm_usb3_ctrl *usb3dbg;
+
+ if (!stm)
+ return -ENODEV;
+
+ usb3dbg = &stm->stm_usb3_hwreg;
+ usb3dbg->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR);
+
+ usb3dbg->reg_word = 0xFF;
+
+ stm_writel(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR, usb3dbg->reg_word);
+
+ return 0;
+}
+
+/**
+ * stm_alloc_static_trb_pool() - set stm trb pool dma_addr and return
+ * trb_pool
+ *
+ * @dma_addr - trb pool dma physical address to set
+ * @return - trb pool address ioremaped pointer
+ */
+static void *stm_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+ struct stm_dev *stm = _dev_stm;
+ if (!stm)
+ return NULL;
+
+ *dma_addr = stm->stm_trb_base;
+ return stm->trb_ioaddr;
+}
+
+static void ebc_io_free_static_trb_pool(void)
+{
+ /* Nothing to do, HW TRB */
+}
+
+static int stm_xfer_start(void)
+{
+ struct stm_dev *stm = _dev_stm;
+ struct stm_ctrl *stm_ctrl;
+
+ if (!stm)
+ return -ENODEV;
+
+ stm_ctrl = &stm->stm_ctrl_hwreg;
+ stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+ stm_ctrl->usb_debug_en = true;
+ stm_ctrl->pti_out_en = false;
+
+ stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+ pr_info("%s\n switch STM output to DvC.Trace ", __func__);
+
+ return 0;
+}
+
+static int stm_xfer_stop(void)
+{
+ struct stm_dev *stm = _dev_stm;
+ struct stm_ctrl *stm_ctrl;
+
+ if (!stm)
+ return -ENODEV;
+
+ stm_ctrl = &stm->stm_ctrl_hwreg;
+ stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+ stm_ctrl->usb_debug_en = false;
+ stm_ctrl->pti_out_en = true;
+
+ stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+ pr_info("%s\n switch STM to 4bits MIPI PTI (default)", __func__);
+
+ return 0;
+}
+
+static struct ebc_io stm_ebc_io_ops = {
+ .name = "stmbuf4kB",
+ .epname = "ep1in",
+ .epnum = 3,
+ .is_ondemand = 1,
+ .static_trb_pool_size = 4,
+ .init = stm_init,
+ .alloc_static_trb_pool = stm_alloc_static_trb_pool,
+ .free_static_trb_pool = ebc_io_free_static_trb_pool,
+ .xfer_start = stm_xfer_start,
+ .xfer_stop = stm_xfer_stop,
+};
+
+#define EXI_IN_TRB_POOL_OFFSET (4*16)
+static void *exi_inbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+ struct stm_dev *stm = _dev_stm;
+ if (!stm)
+ return NULL;
+
+ *dma_addr = stm->stm_trb_base + EXI_IN_TRB_POOL_OFFSET;
+ return stm->trb_ioaddr + EXI_IN_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_in_ebc_io_ops = {
+ .name = "exi-inbound",
+ .epname = "ep8in",
+ .epnum = 17,
+ .is_ondemand = 0,
+ .static_trb_pool_size = 4,
+ .alloc_static_trb_pool = exi_inbound_alloc_static_trb_pool,
+ .free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+#define EXI_OUT_TRB_POOL_OFFSET (8*16)
+static void *exi_outbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+ struct stm_dev *stm = _dev_stm;
+ if (!stm)
+ return NULL;
+
+ *dma_addr = stm->stm_trb_base + EXI_OUT_TRB_POOL_OFFSET;
+ return stm->trb_ioaddr + EXI_OUT_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_out_ebc_io_ops = {
+ .name = "exi-outbound",
+ .epname = "ep8out",
+ .epnum = 16,
+ .is_ondemand = 0,
+ .static_trb_pool_size = 2,
+ .alloc_static_trb_pool = exi_outbound_alloc_static_trb_pool,
+ .free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+int stm_is_enabled()
+{
+ return (_dev_stm != NULL);
+}
+EXPORT_SYMBOL_GPL(stm_is_enabled);
+
+/**
+ * stm_dev_init()- Used to setup STM resources on the pci bus.
+ *
+ * @pdev- pci_dev struct values for pti device.
+ * @stm- stm_dev struct managing stm resources
+ *
+ * Returns:
+ * 0 for success
+ * otherwise, error
+ */
+int stm_dev_init(struct pci_dev *pdev,
+ struct stm_dev *stm)
+{
+ int retval = 0;
+ int pci_bar = 0;
+
+ if (!cpu_has_debug_feature(DEBUG_FEATURE_PTI))
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "%s %s(%d): STM PCI ID %04x:%04x\n", __FILE__,
+ __func__, __LINE__, pdev->vendor, pdev->device);
+
+ stm->stm_addr = pci_resource_start(pdev, pci_bar);
+
+ retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ if (retval != 0) {
+ dev_err(&pdev->dev,
+ "%s(%d): pci_request_region() returned error %d\n",
+ __func__, __LINE__, retval);
+ return retval;
+ }
+ pr_info("stm add %x\n", stm->stm_addr);
+
+ stm->stm_reg_base = stm->stm_addr+STM_REG_BASE;
+ stm->stm_ioaddr = ioremap_nocache((u32)stm->stm_reg_base,
+ STM_REG_LEN);
+ if (!stm->stm_ioaddr) {
+ retval = -ENOMEM;
+ goto out_release_region;
+ }
+
+ stm->stm_trb_base = stm->stm_addr+STM_TRB_BASE;
+ stm->trb_ioaddr = ioremap_nocache((u32)stm->stm_trb_base,
+ STM_TRB_LEN);
+ if (!stm->trb_ioaddr) {
+ retval = -ENOMEM;
+ goto out_iounmap_stm_ioaddr;
+ }
+
+ stm->stm_ctrl_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+ (u32)STM_CTRL);
+ stm->stm_usb3_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+ (u32)STM_USB3DBGGTHR);
+
+ _dev_stm = stm;
+
+ dwc3_register_io_ebc(&stm_ebc_io_ops);
+ dwc3_register_io_ebc(&exi_in_ebc_io_ops);
+ dwc3_register_io_ebc(&exi_out_ebc_io_ops);
+
+ pr_info("successfully registered ebc io ops\n");
+
+ return retval;
+
+out_iounmap_stm_ioaddr:
+ pci_iounmap(pdev, stm->stm_ioaddr);
+
+out_release_region:
+ pci_release_region(pdev, pci_bar);
+
+ _dev_stm = NULL;
+ return retval;
+
+}
+EXPORT_SYMBOL_GPL(stm_dev_init);
+
+/**
+ * stm_dev_clean()- Driver exit method to free STM resources from
+ * PCI bus.
+ * @pdev: variable containing pci info of STM.
+ * @dev_stm: stm_dev resources to clean.
+ */
+void stm_dev_clean(struct pci_dev *pdev,
+ struct stm_dev *dev_stm)
+{
+ int pci_bar = 0;
+
+ /* If STM driver was not initialized properly,
+ * there is nothing to do.
+ */
+ if (_dev_stm == NULL)
+ return;
+
+ dwc3_unregister_io_ebc(&stm_ebc_io_ops);
+ dwc3_unregister_io_ebc(&exi_in_ebc_io_ops);
+ dwc3_unregister_io_ebc(&exi_out_ebc_io_ops);
+
+ if (dev_stm != NULL) {
+ pci_iounmap(pdev, dev_stm->stm_ioaddr);
+ pci_iounmap(pdev, dev_stm->trb_ioaddr);
+ }
+
+ pci_release_region(pdev, pci_bar);
+
+ _dev_stm = NULL;
+}
+EXPORT_SYMBOL_GPL(stm_dev_clean);
+
+module_param_call(stm_out, stm_set_out, stm_get_out, NULL, 0644);
+MODULE_PARM_DESC(stm_out, "configure System Trace Macro output");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florent Pirou");
+MODULE_DESCRIPTION("STM Driver");
--- /dev/null
+/*
+ * stm.h
+ *
+ * Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef _STM_H
+#define _STM_H
+
+#include <linux/pci.h>
+
+/* STM_CTRL register bitmap */
+/**
+ * struct stm_ctrl - STM control block
+ * @usb_debug_en : STM needs to redirece the trace packet to the USB3
+ * @pti_io_idle_threshold : threshold for disabling the IO clock.
+ * @pkt_transfer_size : asserts the *buff_avail signal after it has
+ * 1 or 2 KB of data in buffer
+ * @dis_dcu7_use : disables the useage of DCU7 instead of PTI_Disable
+ * @en_sw_ms : enables software master usage
+ * @mst_id_en : enables the PTI unit to suppress sending the Master Command
+ * @d64_cmd_en : PTI unit to use the D64 commands
+ * @pti_out_mode_sel
+ * 0 = PTI 4-bits legacy end user
+ * 1 = PTI 4-bits NiDnT
+ * 2 = PTI 16-bits
+ * 3 = PTI 12-bits
+ * 4 = PTI 8-bits
+ * @pti_out_en : PTI output enable muxselects that propagate
+ * to the FLIS to be enabled
+ * @lossy_mode_enable : Output Agent will continue to accept writes,
+ * even if the queuese are full. The data will be dropped and the
+ * dropped packet indicator will be incremented
+ * @time_stamp_enable : Enable time stamping the final packet in trace record.
+ */
+struct stm_ctrl {
+ union {
+ struct {
+ u32 time_stamp_enable:1;
+ u32 lossy_mode_enable:1;
+ u32 pti_out_en:1;
+ u32 reserved:1;
+ u32 pti_out_mode_sel:4;
+ u32 d64_cmd_en:1;
+ u32 mst_id_en:1;
+ u32 en_sw_ms:1;
+ u32 dis_dcu7_use:1;
+ u32 pkt_transfer_size:1;
+ u32 pti_io_idle_threshold:5;
+ u32 usb_debug_en:1;
+ u32 reserved31_19:13;
+ };
+ u32 reg_word;
+ };
+} __packed;
+
+/**
+ * struct stm_usb3_ctrl - STM buffer USB3 hardware EBC
+ * @region_closure_threshold : This is the threshold for closing
+ * the 1KB region in the debug trace buffer. STM will wait for the
+ * configured time as specified in this field and then closes the region.
+ * The unit of this field is in 64 us. Eg when this field value is set
+ * to 0xffff, then it indicates 2 ms
+ * @empty_packets_threshold : When STM does not have data to send,
+ * it can send empty packets to keep the USB3 alive. This is useful
+ * in case of ISOC traffic, because in this mode the wake up latency
+ * is high. STM will send the configured number of empty packets as
+ * specified in this field.
+ */
+struct stm_usb3_ctrl {
+ union {
+ struct {
+ u32 region_closure_threshold:15;
+ u32 empty_packets_threshold:6;
+ u32 reserved31_21:11;
+ };
+ u32 reg_word;
+ };
+} __packed;
+
+struct stm_dev {
+ unsigned long stm_addr;
+ unsigned long stm_reg_base;
+ unsigned long stm_trb_base;
+ void __iomem *stm_ioaddr;
+ void __iomem *trb_ioaddr;
+ struct stm_ctrl stm_ctrl_hwreg;
+ struct stm_usb3_ctrl stm_usb3_hwreg;
+};
+
+int stm_dev_init(struct pci_dev *pdev, struct stm_dev *dev_stm);
+void stm_dev_clean(struct pci_dev *pdev, struct stm_dev *dev_stm);
+
+#endif /* _STM_H */
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
+#define MMC_BLK_SUSPENDED (1 << 3) /* MMC block device suspended */
unsigned int usage;
unsigned int read_only;
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
-
+#define MMC_BLK_RPMB BIT(4)
+#define MMC_BLK_USER BIT(5)
/*
* Only set in main mmc_blk_data associated
* with mmc_card with mmc_set_drvdata, and keeps
packed->blocks = 0;
}
+static int mmc_rpmb_req_process(struct mmc_blk_data *,
+ struct mmc_ioc_rpmb_req *);
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
return err;
}
+static int mmc_blk_ioctl_rpmb_req(struct block_device *bdev,
+ struct mmc_ioc_rpmb_req __user *ptr)
+{
+ struct mmc_ioc_rpmb_req req;
+ struct mmc_blk_data *md = NULL;
+ int err = 0;
+
+ /* The caller must have CAP_SYS_RAWIO */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ memset(&req, 0, sizeof(req));
+
+ if (copy_from_user(&req, ptr, sizeof(req)))
+ return -EFAULT;
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ pr_err("%s: NO eMMC block data. Try it later\n",
+ __func__);
+ return -ENODEV;
+ }
+ /* handle RPMB request event */
+ err = mmc_rpmb_req_process(md, &req);
+ if (err) {
+ mmc_blk_put(md);
+ return err;
+ }
+ /*
+ * feedback to user space
+ */
+ if (copy_to_user(ptr, &req, sizeof(req)))
+ return -EFAULT;
+
+ mmc_blk_put(md);
+ return 0;
+}
+
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret = -EINVAL;
if (cmd == MMC_IOC_CMD)
ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+ else if (cmd == MMC_IOC_RPMB_REQ)
+ ret = mmc_blk_ioctl_rpmb_req(bdev,
+ (struct mmc_ioc_rpmb_req __user *)arg);
return ret;
}
md->reset_done &= ~type;
}
+static int mmc_rpmb_req_process(struct mmc_blk_data *md,
+ struct mmc_ioc_rpmb_req *req)
+{
+ struct mmc_core_rpmb_req rpmb_req;
+ struct mmc_card *card = NULL;
+ int ret;
+
+ if (!md || !req)
+ return -EINVAL;
+
+ if (!(md->flags & MMC_BLK_CMD23) ||
+ (md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB))
+ return -EOPNOTSUPP;
+
+ card = md->queue.card;
+ if (!card || !mmc_card_mmc(card) || !card->ext_csd.rpmb_size)
+ return -ENODEV;
+
+ memset(&rpmb_req, 0, sizeof(struct mmc_core_rpmb_req));
+ rpmb_req.req = req;
+ /* check request */
+ ret = mmc_rpmb_pre_frame(&rpmb_req, card);
+ if (ret) {
+ pr_err("%s: prepare frame failed\n", mmc_hostname(card->host));
+ return ret;
+ }
+
+ mmc_claim_host(card->host);
+
+ if (md->flags & MMC_BLK_SUSPENDED) {
+ pr_warn("%s: MMC block device is already suspended\n",
+ mmc_hostname(card->host));
+ ret = -EPERM;
+ goto out;
+ }
+ /*
+ * before start, let's change to RPMB partition first
+ */
+ ret = mmc_blk_part_switch(card, md);
+ if (ret) {
+ pr_err("%s: Invalid RPMB partition switch (%d)!\n",
+ mmc_hostname(card->host), ret);
+ /*
+ * In case partition is not in user data area, make
+ * a force partition switch.
+ * we need reset eMMC card at here
+ */
+ ret = mmc_blk_reset(md, card->host, MMC_BLK_RPMB);
+ if (!ret)
+ mmc_blk_reset_success(md, MMC_BLK_RPMB);
+ else
+ pr_err("%s: eMMC card reset failed (%d)\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ ret = mmc_rpmb_partition_ops(&rpmb_req, card);
+ if (ret)
+ pr_err("%s: failed (%d) to handle RPMB request type (%d)!\n",
+ mmc_hostname(card->host), ret, req->type);
+out:
+ mmc_release_host(card->host);
+ mmc_rpmb_post_frame(&rpmb_req);
+ return ret;
+}
+
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+ struct mmc_blk_data *md = mq->data;
+ /*
+ * If this is a RPMB partition access, return ture
+ */
+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mmc_access_rpmb);
+
+int mmc_rpmb_req_handle(struct device *emmc, struct mmc_ioc_rpmb_req *req)
+{
+ int ret = 0;
+ struct gendisk *disk = NULL;
+ struct mmc_blk_data *md = NULL;
+
+ if (!emmc || !req)
+ return -EINVAL;
+
+ disk = dev_to_disk(emmc);
+ if (!disk) {
+ pr_err("%s: NO eMMC disk found. Try it later\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ md = mmc_blk_get(disk);
+ if (!md) {
+ pr_err("%s: NO eMMC block data. Try it later\n",
+ __func__);
+ return -ENODEV;
+ }
+ ret = mmc_rpmb_req_process(md, req);
+ mmc_blk_put(md);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_req_handle);
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
ret = mmc_blk_part_switch(card, md);
if (ret) {
- if (req) {
- blk_end_request_all(req, -EIO);
+ pr_err("%s: switch part failed. Try to reset eMMC\n",
+ mmc_hostname(card->host));
+ if (mmc_blk_reset(md, card->host, MMC_BLK_USER)) {
+ if (req)
+ blk_end_request_all(req, -EIO);
+ ret = 0;
+ goto out;
}
- ret = 0;
- goto out;
+ pr_info("%s: Reset eMMC success\n", mmc_hostname(card->host));
+ mmc_blk_reset_success(md, MMC_BLK_USER);
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
mmc_queue_suspend(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_suspend(&part_md->queue);
+ if (part_md->part_type ==
+ EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ /*
+ * RPMB partition is accessed by API directly.
+ * Driver need to set a flag when suspending
+ * MMC block device to notify API that the
+ * accessing of RPMB partition needs to be
+ * stopped
+ */
+ mmc_claim_host(card->host);
+ part_md->flags |= MMC_BLK_SUSPENDED;
+ mmc_release_host(card->host);
+ }
}
}
return 0;
mmc_queue_resume(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_resume(&part_md->queue);
+ if (part_md->part_type ==
+ EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ /*
+ * RPMB partition is accessed by API directly.
+ * Driver need to clear MMC_BLK_SUSPENDED flag
+ * to make sure the next RPMB partition access
+ * request won't be blocked
+ */
+ mmc_claim_host(card->host);
+ part_md->flags &= ~MMC_BLK_SUSPENDED;
+ mmc_release_host(card->host);
+ }
}
}
return 0;
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include "queue.h"
#define MMC_QUEUE_BOUNCESZ 65536
return BLKPREP_KILL;
}
- if (mq && mmc_card_removed(mq->card))
+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
req->cmd_flags |= REQ_DONTPREP;
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
extern void mmc_packed_clean(struct mmc_queue *);
+extern int mmc_access_rpmb(struct mmc_queue *);
#endif
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o slot-gpio.o
+ quirks.o slot-gpio.o mmc_panic_ops.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_highspeed(card) ? "high speed " : ""),
(mmc_card_hs200(card) ? "HS200 " : ""),
+ (mmc_card_hs400(card) ? "HS400 " : ""),
mmc_card_ddr_mode(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
mmc_set_ios(host);
/* Wait for at least 1 ms according to spec */
- mmc_delay(1);
+ if (host->ops->busy_wait)
+ host->ops->busy_wait(host, 1000);
+ else
+ mmc_delay(1);
/*
* Failure to switch is indicated by the card holding
mmc_host_clk_hold(host);
+ if (host->ops->set_dev_power)
+ host->ops->set_dev_power(host, true);
+
/* If ocr is set, we use it */
if (host->ocr)
bit = ffs(host->ocr) - 1;
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
- mmc_delay(10);
+ usleep_range(10000, 11000);
host->ios.clock = host->f_init;
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
- mmc_delay(10);
+ usleep_range(5000, 6000);
mmc_host_clk_release(host);
}
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+ if (host->ops->set_dev_power)
+ host->ops->set_dev_power(host, false);
+
/*
* Some configurations, such as the 802.11 SDIO card in the OLPC
* XO-1.5, require a short delay after poweroff before the card
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->ext_csd.hc_erase_size) {
card->pref_erase = card->ext_csd.hc_erase_size;
- } else {
+ } else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
if (sz)
card->pref_erase += card->erase_size - sz;
}
- }
+ } else
+ card->pref_erase = 0;
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ mmc_emergency_setup(host);
+ if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
else
mmc_power_up(host);
mmc_detect_change(host, 0);
+ if (host->caps2 & MMC_CAP2_INIT_CARD_SYNC)
+ flush_work_sync(&host->detect.work);
}
void mmc_stop_host(struct mmc_host *host)
case MMC_TIMING_MMC_HS200:
str = "mmc high-speed SDR200";
break;
+ case MMC_TIMING_MMC_HS400:
+ str = "mmc high-speed DDR200";
+ break;
default:
str = "invalid";
break;
static void mmc_select_card_type(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+ u8 card_type = card->ext_csd.raw_card_type &
+ EXT_CSD_CARD_TYPE_MASK_FULL;
u32 caps = host->caps, caps2 = host->caps2;
unsigned int hs_max_dtr = 0;
card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
hs_max_dtr = MMC_HS200_MAX_DTR;
+ if ((caps2 & MMC_CAP2_HS400_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) ||
+ (caps2 & MMC_CAP2_HS400_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_2V))
+ hs_max_dtr = MMC_HS400_MAX_DTR;
+
card->ext_csd.hs_max_dtr = hs_max_dtr;
card->ext_csd.card_type = card_type;
}
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
+ card->ext_csd.rpmb_size = 128 *
+ card->ext_csd.raw_rpmb_size_mult;
+ card->ext_csd.rpmb_size <<= 2; /* Unit: half sector */
if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
card->ext_csd.data_sector_size = 512;
}
+ /*
+ * If use legacy reliable write, then the blk counts must not
+ * big than the reliable write sectors
+ */
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+ if (card->ext_csd.rel_sectors < RPMB_AVALIABLE_SECTORS)
+ card->rpmb_max_req = card->ext_csd.rel_sectors;
+ else
+ card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
+ } else
+ card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
out:
return err;
}
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(rpmb_size, "%d\n", card->ext_csd.rpmb_size);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
&dev_attr_rel_sectors.attr,
+ &dev_attr_rpmb_size.attr,
NULL,
};
index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
EXT_CSD_PWR_CL_52_195 :
EXT_CSD_PWR_CL_DDR_52_195;
- else if (host->ios.clock <= 200000000)
- index = EXT_CSD_PWR_CL_200_195;
+ else if (host->ios.clock <= 200000000) {
+ if (mmc_card_hs400(card))
+ index = EXT_CSD_PWR_CL_200_DDR_195;
+ else
+ index = EXT_CSD_PWR_CL_200_195;
+ }
break;
case MMC_VDD_27_28:
case MMC_VDD_28_29:
}
/*
+ * Support HS400:
+ * This function should be called after HS200 tuning.
+ */
+static int mmc_select_hs400_start(struct mmc_card *card)
+{
+ int err = -EINVAL;
+ struct mmc_host *host;
+ static unsigned ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
+ static unsigned bus_width = MMC_BUS_WIDTH_8;
+
+ BUG_ON(!card);
+
+ host = card->host;
+ /* HS400 mode only supports 8bit bus.*/
+ if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+ pr_err("HS400: MMC host does not support 8bit bus, error!\n");
+ goto err;
+ }
+
+ /* Must set HS_TIMING to 1 after tuning completion. */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1, 0);
+ if (!err) {
+ /* Set timing to DDR50 first */
+ mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
+ /* Then, set clock to 50MHz */
+ mmc_set_clock(host, MMC_HIGH_DDR_MAX_DTR);
+ } else {
+ goto err;
+ }
+
+ /*
+ * Host is capable of 8bit transfer, switch
+ * the device to work in 8bit transfer mode.
+ * On success set 8bit bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bit,
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ goto err;
+
+ /* Bus test */
+ mmc_set_bus_width(card->host, bus_width);
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_width);
+ else
+ err = mmc_bus_test(card, bus_width);
+ if (err)
+ goto err;
+
+err:
+ return err;
+}
+
+static int mmc_select_hs400_end(struct mmc_card *card, unsigned int max_dtr)
+{
+ int err = -EINVAL;
+
+ /* Switch timing to HS400 now. */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 3, 0);
+ if (!err) {
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS400);
+ /*
+ * After enablig HS400 mode, we should restore
+ * frequency to 200MHz.
+ */
+ mmc_set_clock(card->host, max_dtr);
+ }
+ return err;
+}
+
+/*
* Selects the desired buswidth and switch to the HS200 mode
* if bus width set without error
*/
host = card->host;
- if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
- host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR) ||
+ (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_2V &&
+ host->caps2 & MMC_CAP2_HS400_1_2V_DDR))
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
- if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
- host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+ if (err && ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+ host->caps2 & MMC_CAP2_HS200_1_8V_SDR) ||
+ (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_8V &&
+ host->caps2 & MMC_CAP2_HS400_1_8V_DDR)))
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
*/
if (card->ext_csd.hs_max_dtr != 0) {
err = 0;
+ /* Support HS400: set to HS200 before tuning complete. */
if (card->ext_csd.hs_max_dtr > 52000000 &&
- host->caps2 & MMC_CAP2_HS200)
+ (host->caps2 & MMC_CAP2_HS200 ||
+ host->caps2 & MMC_CAP2_HS400))
err = mmc_select_hs200(card);
else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
err = 0;
} else {
if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS400 &&
+ (card->ext_csd.card_type &
+ EXT_CSD_CARD_TYPE_HS400_1_8V ||
+ card->ext_csd.card_type &
+ EXT_CSD_CARD_TYPE_HS400_1_2V)) {
+ mmc_card_set_hs400(card);
+ mmc_set_timing(card->host,
+ MMC_TIMING_MMC_HS200);
+ } else if (card->ext_csd.hs_max_dtr > 52000000 &&
host->caps2 & MMC_CAP2_HS200) {
mmc_card_set_hs200(card);
mmc_set_timing(card->host,
*/
max_dtr = (unsigned int)-1;
- if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
+ if (mmc_card_highspeed(card) ||
+ mmc_card_hs200(card) ||
+ mmc_card_hs400(card)) {
if (max_dtr > card->ext_csd.hs_max_dtr)
max_dtr = card->ext_csd.hs_max_dtr;
if (mmc_card_highspeed(card) && (max_dtr > 52000000))
}
/*
- * Indicate HS200 SDR mode (if supported).
+ * Indicate HS200 SDR mode or HS400 DDR mode (if supported).
*/
- if (mmc_card_hs200(card)) {
+ if (mmc_card_hs200(card) || mmc_card_hs400(card)) {
u32 ext_csd_bits;
u32 bus_width = card->host->ios.bus_width;
* 3. set the clock to > 52Mhz <=200MHz and
* 4. execute tuning for HS200
*/
- if ((host->caps2 & MMC_CAP2_HS200) &&
+ /* Support HS400: tuning under HS200 mode. */
+ if ((host->caps2 & MMC_CAP2_HS200 ||
+ host->caps2 & MMC_CAP2_HS400) &&
card->host->ops->execute_tuning) {
mmc_host_clk_hold(card->host);
err = card->host->ops->execute_tuning(card->host,
goto err;
}
- ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ /* Support HS400 */
+ if (mmc_card_hs400(card)) {
+ /*
+ * Per spec 5.0, follow below sequence to enable HS400:
+ * 1. Set HS_TIMING to 1 after HS200 tuning.
+ * 2. Set frequency below 52MHz.
+ * 3. Set bus width to DDR 8bit.
+ * 4. Set HS_TIMING to 3 as HS400.
+ */
+ err = mmc_select_hs400_start(card);
+ if (err) {
+ pr_warn("%s: hs400_start err=0x%x.\n",
+ mmc_hostname(card->host), err);
+ goto free_card;
+ }
+ ext_csd_bits = EXT_CSD_DDR_BUS_WIDTH_8;
+ } else {
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+ }
err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
if (err)
- pr_warning("%s: power class selection to bus width %d"
+ pr_warn("%s: power class selection to bus width %d"
" failed\n", mmc_hostname(card->host),
1 << bus_width);
+ if (mmc_card_hs400(card)) {
+ err = mmc_select_hs400_end(card, max_dtr);
+ if (err) {
+ pr_warn("%s: hs400_end err=0x%x.\n",
+ mmc_hostname(card->host), err);
+ goto free_card;
+ }
+ }
}
/*
* Activate wide bus and DDR (if supported).
*/
- if (!mmc_card_hs200(card) &&
+ if ((!mmc_card_hs200(card) && !mmc_card_hs400(card)) &&
(card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
static unsigned ext_csd_bits[][2] = {
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
+ for (i = 200; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
break;
err = -ETIMEDOUT;
- mmc_delay(10);
+ usleep_range(5000, 5500);
}
if (rocr && !mmc_host_is_spi(host))
return 0;
}
+
+static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks,
+ __u16 type, u8 req_type)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_command sbc = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ u8 *transfer_buf = NULL;
+
+ mrq.sbc = &sbc;
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = NULL;
+ transfer_buf = kzalloc(512 * blks, GFP_KERNEL);
+ if (!transfer_buf)
+ return -ENOMEM;
+
+ /*
+ * set CMD23
+ */
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ sbc.arg = blks;
+ if ((req_type == RPMB_REQ) && (type == RPMB_WRITE_DATA ||
+ type == RPMB_PROGRAM_KEY))
+ sbc.arg |= 1 << 31;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ /*
+ * set CMD25/18
+ */
+ sg_init_one(&sg, transfer_buf, 512 * blks);
+ if (req_type == RPMB_REQ) {
+ cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ sg_copy_from_buffer(&sg, 1, buf, 512 * blks);
+ data.flags |= MMC_DATA_WRITE;
+ } else {
+ cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ data.flags |= MMC_DATA_READ;
+ }
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ data.blksz = 512;
+ data.blocks = blks;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (req_type != RPMB_REQ)
+ sg_copy_to_buffer(&sg, 1, buf, 512 * blks);
+
+ kfree(transfer_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+ return 0;
+}
+
+void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *rpmb_req)
+{
+ int i;
+ struct mmc_ioc_rpmb_req *p_req;
+ __u8 *buf_frame;
+
+ if (!rpmb_req || !rpmb_req->ready)
+ return;
+
+ p_req = rpmb_req->req;
+ buf_frame = rpmb_req->frame;
+
+ if (!p_req || !buf_frame)
+ return;
+ /*
+ * Regarding to the check rules, here is the post
+ * rules
+ * All will return result.
+ * GET_WRITE_COUNTER:
+ * must: write counter, nonce
+ * optional: MAC
+ * WRITE_DATA:
+ * must: MAC, write counter
+ * READ_DATA:
+ * must: nonce, data
+ * optional: MAC
+ * PROGRAM_KEY:
+ * must: Nothing
+ *
+ * Except READ_DATA, all of these operations only need to parse
+ * one frame. READ_DATA needs blks frames to get DATA
+ */
+
+ memcpy(p_req->result, buf_frame + RPMB_RES_BEG, 2);
+ *p_req->result = be16_to_cpup(p_req->result);
+
+ if (p_req->type == RPMB_PROGRAM_KEY)
+ goto out;
+
+ if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+ p_req->type == RPMB_WRITE_DATA) {
+ memcpy(p_req->wc, buf_frame + RPMB_WCOUNTER_BEG, 4);
+ *p_req->wc = be32_to_cpup(p_req->wc);
+ }
+
+ if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+ p_req->type == RPMB_READ_DATA) {
+ /* nonce copy */
+ memcpy(p_req->nonce, buf_frame + RPMB_NONCE_BEG, 16);
+ }
+ /*
+ * Take MAC within the last package
+ */
+ if (p_req->type == RPMB_READ_DATA) {
+ __u8 *data = p_req->data;
+ for (i = 0; i < p_req->blk_cnt; i++) {
+ memcpy(data, buf_frame + i * 512 + RPMB_DATA_BEG, 256);
+ data += 256;
+ }
+ /*
+ * MAC stored in the last package
+ */
+ if (p_req->mac)
+ memcpy(p_req->mac, buf_frame + i * 512 + RPMB_MAC_BEG,
+ 32);
+ } else if (p_req->mac)
+ memcpy(p_req->mac, buf_frame + RPMB_MAC_BEG, 32);
+out:
+ kfree(buf_frame);
+ rpmb_req->frame = NULL;
+ return;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_post_frame);
+
+static int mmc_rpmb_request_check(struct mmc_card *card,
+ struct mmc_ioc_rpmb_req *p_req)
+{
+ /*
+ * Some parameters are a must for the operation. Different
+ * operation expect different paramters. Below code is
+ * used for checking this.
+ *
+ * All operations will need result.
+ * GET_WRITE_COUNTER:
+ * must: write counter, nonce
+ * optional: MAC
+ * WRITE_DATA:
+ * must: MAC, data, write counter
+ * READ_DATA:
+ * must: nonce, data
+ * optional: MAC
+ * PROGRAM_KEY:
+ * must: MAC
+ *
+ * So here, we only check the 'must' paramters
+ */
+ if (!p_req->result) {
+ pr_err("%s: Type %d has NULL pointer for result\n",
+ mmc_hostname(card->host), p_req->type);
+ return -EINVAL;
+ }
+
+ if (p_req->type == RPMB_GET_WRITE_COUNTER) {
+ if (!p_req->nonce || !p_req->wc) {
+ pr_err("%s: Type %d has NULL pointer for nonce/wc\n",
+ mmc_hostname(card->host), p_req->type);
+ return -EINVAL;
+ }
+ /*
+ * used to allocate frame
+ */
+ p_req->blk_cnt = 1;
+ } else if (p_req->type == RPMB_WRITE_DATA ||
+ p_req->type == RPMB_READ_DATA) {
+ if ((__u32)(p_req->addr + p_req->blk_cnt) >
+ card->ext_csd.rpmb_size) {
+ pr_err("%s Type %d: beyond the RPMB partition rang addr %d, blk_cnt %d, rpmb_size %d\n",
+ mmc_hostname(card->host),
+ p_req->type,
+ p_req->addr,
+ p_req->blk_cnt,
+ card->ext_csd.rpmb_size);
+ return -EINVAL;
+ }
+ if (p_req->blk_cnt == 0) {
+ pr_err("%s: Type %d has zero block count\n",
+ mmc_hostname(card->host),
+ p_req->blk_cnt);
+ return -EINVAL;
+ } else if (p_req->blk_cnt > card->rpmb_max_req) {
+ pr_err("%s: Type %d has invalid block count, cannot large than %d\n",
+ mmc_hostname(card->host),
+ p_req->blk_cnt,
+ card->rpmb_max_req);
+ return -EINVAL;
+ }
+ if (!p_req->data) {
+ pr_err("%s: Type %d has NULL pointer for data\n",
+ mmc_hostname(card->host), p_req->type);
+ return -EINVAL;
+ }
+ if (p_req->type == RPMB_WRITE_DATA) {
+ if (!p_req->wc || !p_req->mac) {
+ pr_err("%s: Type %d has NULL pointer for write counter/MAC\n",
+ mmc_hostname(card->host),
+ p_req->type);
+ return -EINVAL;
+ }
+ } else {
+ if (!p_req->nonce) {
+ pr_err("%s: Type %d has NULL pointer for nonce\n",
+ mmc_hostname(card->host),
+ p_req->type);
+ return -EINVAL;
+ }
+ }
+ } else if (p_req->type == RPMB_PROGRAM_KEY) {
+ if (!p_req->mac) {
+ pr_err("%s: Type %d has NULL pointer for MAC\n",
+ mmc_hostname(card->host), p_req->type);
+ return -EINVAL;
+ }
+ /*
+ * used to allocate frame
+ */
+ p_req->blk_cnt = 1;
+ } else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * prepare the request of RPMB frame
+ * RPMB frame is MSB first
+ * convert needed bytes
+ * return how many frames will be prepared
+ */
+int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *rpmb_req,
+ struct mmc_card *card)
+{
+ int i, ret;
+ struct mmc_ioc_rpmb_req *p_req;
+ __u8 *buf_frame;
+ __u16 blk_cnt, addr, type;
+ __u32 w_counter;
+
+ if (!rpmb_req || !card)
+ return -EINVAL;
+
+ p_req = rpmb_req->req;
+ if (!p_req) {
+ pr_err("%s: mmc_ioc_rpmb_req is NULL. Wrong parameter\n",
+ mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
+ /*
+ * make sure these two items are clear
+ */
+ rpmb_req->ready = 0;
+ rpmb_req->frame = NULL;
+
+ ret = mmc_rpmb_request_check(card, p_req);
+ if (ret)
+ return ret;
+
+ buf_frame = kzalloc(512 * p_req->blk_cnt, GFP_KERNEL);
+ if (!buf_frame) {
+ pr_err("%s: cannot allocate frame for type %d\n",
+ mmc_hostname(card->host), p_req->type);
+ return -ENOMEM;
+ }
+
+ type = cpu_to_be16p(&p_req->type);
+ if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+ p_req->type == RPMB_READ_DATA) {
+ /*
+ * One package prepared
+ * This request needs Nonce and type
+ * If is data read, then also need addr
+ */
+ memcpy(buf_frame + RPMB_TYPE_BEG, &type, 2);
+ if (p_req->type == RPMB_READ_DATA) {
+ addr = cpu_to_be16p(&p_req->addr);
+ memcpy(buf_frame + RPMB_ADDR_BEG, &addr, 2);
+ }
+ /* convert Nonce code */
+ memcpy(buf_frame + RPMB_NONCE_BEG, p_req->nonce, 16);
+ } else if (p_req->type == RPMB_WRITE_DATA) {
+ __u8 *data = p_req->data;
+ /*
+ * multiple package prepared
+ * This request nees blk_cnt, addr, write_counter,
+ * data and mac
+ */
+ blk_cnt = cpu_to_be16p(&p_req->blk_cnt);
+ addr = cpu_to_be16p(&p_req->addr);
+ w_counter = cpu_to_be32p(p_req->wc);
+ for (i = 0; i < p_req->blk_cnt; i++) {
+ memcpy(buf_frame + i * 512 + RPMB_TYPE_BEG,
+ &type, 2);
+ memcpy(buf_frame + i * 512 + RPMB_BLKS_BEG,
+ &blk_cnt, 2);
+ memcpy(buf_frame + i * 512 + RPMB_ADDR_BEG,
+ &addr, 2);
+ memcpy(buf_frame + i * 512 + RPMB_WCOUNTER_BEG,
+ &w_counter, 4);
+ memcpy(buf_frame + i * 512 + RPMB_DATA_BEG,
+ data, 256);
+ data += 256;
+ }
+ /* convert MAC code */
+ memcpy(buf_frame + 512 * (i - 1) + RPMB_MAC_BEG,
+ p_req->mac, 32);
+ } else if (p_req->type == RPMB_PROGRAM_KEY) {
+ /*
+ * One package prepared
+ * This request only need mac
+ */
+ memcpy(buf_frame + RPMB_TYPE_BEG, &type, 2);
+ /* convert MAC code */
+ memcpy(buf_frame + RPMB_MAC_BEG,
+ p_req->mac, 32);
+ } else {
+ pr_err("%s: We shouldn't be here\n", mmc_hostname(card->host));
+ kfree(buf_frame);
+ return -EINVAL;
+ }
+ rpmb_req->ready = 1;
+ rpmb_req->frame = buf_frame;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_pre_frame);
+
+int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *rpmb_req,
+ struct mmc_card *card)
+{
+ int err = 0;
+ struct mmc_ioc_rpmb_req *p_req;
+ __u16 type, blks;
+ __u8 *buf_frame;
+
+ if (!rpmb_req || !card)
+ return -EINVAL;
+
+ p_req = rpmb_req->req;
+ buf_frame = rpmb_req->frame;
+
+ if (!p_req || !rpmb_req->ready || !buf_frame) {
+ pr_err("%s: mmc_ioc_rpmb_req is not prepared\n",
+ mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
+ type = p_req->type;
+ blks = p_req->blk_cnt;
+
+ /*
+ * STEP 1: send request to RPMB partition
+ */
+ if (type == RPMB_WRITE_DATA)
+ err = mmc_rpmb_send_command(card, buf_frame, blks,
+ type, RPMB_REQ);
+ else
+ err = mmc_rpmb_send_command(card, buf_frame, 1, type, RPMB_REQ);
+
+ if (err) {
+ pr_err("%s: request write counter failed (%d)\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ memset(buf_frame, 0, 512 * blks);
+ /*
+ * STEP 2: check write result
+ * Only for WRITE_DATA or Program key
+ */
+ if (type == RPMB_WRITE_DATA ||
+ type == RPMB_PROGRAM_KEY) {
+ buf_frame[RPMB_TYPE_BEG + 1] = RPMB_RESULT_READ;
+ err = mmc_rpmb_send_command(card, buf_frame, 1,
+ RPMB_RESULT_READ, RPMB_REQ);
+ if (err) {
+ pr_err("%s: request write counter failed (%d)\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+ }
+
+ /*
+ * STEP 3: get response from RPMB partition
+ */
+
+ if (type == RPMB_READ_DATA)
+ err = mmc_rpmb_send_command(card, buf_frame,
+ blks, type, RPMB_RESP);
+ else
+ err = mmc_rpmb_send_command(card, buf_frame,
+ 1, type, RPMB_RESP);
+ if (err) {
+ pr_err("%s: response write counter failed (%d)\n",
+ mmc_hostname(card->host), err);
+ }
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_partition_ops);
--- /dev/null
+/*
+ * linux/drivers/mmc/core/mmc_panic_ops.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * Author: chuanxiao.dong@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "bus.h"
+#include "host.h"
+
+#include "mmc_ops.h"
+
+
+static struct mmc_panic_host *panic_host;
+
+static int mmc_emergency_prepare(void)
+{
+ struct mmc_host *mmc = panic_host->mmc;
+
+ if (mmc == NULL) {
+ pr_err("%s: panic host was not setup\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * once panic happened, we monopolize the host controller.
+ * so claim host without relase any more.
+ */
+ mmc->claimed = 1;
+ mmc->claimer = current;
+ mmc->claim_cnt += 1;
+#ifdef CONFIG_MMC_CLKGATE
+ /*
+ * disable the clock gating
+ */
+ mmc->clk_gated = false;
+ mmc->clk_requests++;
+ mmc->ios.clock = mmc->clk_old;
+#endif
+ return 0;
+}
+
+static void mmc_emergency_ready(void)
+{
+ panic_host->panic_ready = 1;
+}
+
+/*
+ * Return the card size in sectors.
+ *
+ * return value:
+ * the sector number
+ */
+static unsigned int mmc_get_capacity(struct mmc_card *card)
+{
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+ return card->ext_csd.sectors;
+ else
+ return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+static void mmc_emergency_send_req(struct mmc_request *mrq)
+{
+ struct mmc_panic_host *host = panic_host;
+
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+
+ if (mrq->data) {
+ BUG_ON(mrq->data->blksz > host->max_blk_size);
+ BUG_ON(mrq->data->blocks > host->max_blk_count);
+ BUG_ON(mrq->data->blocks * mrq->data->blksz >
+ host->max_req_size);
+
+ mrq->cmd->data = mrq->data;
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ if (mrq->stop) {
+ mrq->data->stop = mrq->stop;
+ mrq->stop->error = 0;
+ mrq->stop->mrq = mrq;
+ }
+ }
+
+ /*
+ * Send the request to host
+ *
+ * if request handling is successful, return.
+ * If request handling is failed and has rety, resend request.
+ * During retry, if request handling is still failed, core layer
+ * will keep on retry untill cmd->retries is 0.
+ *
+ * So in this way, makes retry blind to host driver. Totally
+ * controlled by core driver
+ */
+ host->panic_ops->request(host, mrq);
+
+ while ((mrq->cmd->error || (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error)))) &&
+ mrq->cmd->retries > 0) {
+ /* clear errors */
+ mrq->cmd->error = 0;
+ if (mrq->data) {
+ mrq->data->error = 0;
+ if (mrq->stop)
+ mrq->stop->error = 0;
+ }
+ host->panic_ops->request(host, mrq);
+ mrq->cmd->retries--;
+ }
+}
+
+static int mmc_emergency_send_cmd(struct mmc_command *cmd, int retries)
+{
+ struct mmc_request mrq;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = retries;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_emergency_send_req(&mrq);
+
+ return cmd->error;
+}
+
+static int __mmc_emergency_write(unsigned int blk_id)
+{
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ cmd.opcode = MMC_WRITE_BLOCK;
+ cmd.arg = blk_id;
+ if (!panic_host->blkaddr)
+ cmd.arg <<= 9;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ /*
+ * Fix these values;
+ */
+ data.blksz = 512;
+ data.blocks = 1;
+ data.dmabuf = panic_host->dmabuf;
+
+ mmc_emergency_send_req(&mrq);
+
+ return cmd.error;
+}
+
+
+static int mmc_emergency_go_idle(struct mmc_panic_host *host)
+{
+ int err;
+ struct mmc_command cmd;
+
+ /*
+ * Non-SPI hosts need to prevent chipselect going active during
+ * GO_IDLE; that would put chips into SPI mode. Remind them of
+ * that in case of hardware that won't pull up DAT3/nCS otherwise.
+ *
+ * SPI hosts ignore ios.chip_select; it's managed according to
+ * rules that must accomodate non-MMC slaves which this layer
+ * won't even know about.
+ */
+ if (!mmc_host_is_spi(host)) {
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->panic_ops->set_ios(host);
+ mdelay(1);
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+ err = mmc_emergency_send_cmd(&cmd, 0);
+
+ mdelay(1);
+
+ if (!mmc_host_is_spi(host)) {
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->panic_ops->set_ios(host);
+ mdelay(1);
+ }
+
+ return err;
+}
+static int mmc_emergency_send_op_cond(struct mmc_panic_host *host,
+ u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd;
+ int i, err = 0;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_OP_COND;
+ cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_emergency_send_cmd(&cmd, 0);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd.resp[0] & R1_SPI_IDLE))
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ /*
+ * If command 1 is failed, wait 10ms and then
+ * have a retry. Card may need time to prepare
+ * for the next command 1
+ */
+ mdelay(10);
+ }
+
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+static int mmc_emergency_all_send_cid(u32 *cid)
+{
+ int err;
+ struct mmc_command cmd;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_ALL_SEND_CID;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
+
+ err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ memcpy(cid, cmd.resp, sizeof(u32) * 4);
+
+ return 0;
+}
+
+static int mmc_emergency_set_relative_addr(struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SET_RELATIVE_ADDR;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mmc_emergency_select_card(struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SELECT_CARD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+ }
+
+ err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mmc_emergency_send_status(struct mmc_panic_host *host, u32 *status)
+{
+ struct mmc_card *card = host->card;
+ int err;
+ struct mmc_command cmd;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* NOTE: callers are required to understand the difference
+ * between "native" and SPI format status words!
+ */
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+static int mmc_emergency_switch(struct mmc_panic_host *host,
+ u8 set, u8 index, u8 value)
+{
+ struct mmc_card *card = host->card;
+ int err;
+ struct mmc_command cmd;
+ u32 status;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (index << 16) |
+ (value << 8) |
+ set;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* Must check status to be sure of no errors */
+ do {
+ err = mmc_emergency_send_status(host, &status);
+ if (err)
+ return err;
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ break;
+ if (mmc_host_is_spi(host))
+ break;
+ } while (R1_CURRENT_STATE(status) == 7);
+
+ if (mmc_host_is_spi(host)) {
+ if (status & R1_SPI_ILLEGAL_COMMAND)
+ return -EBADMSG;
+ } else {
+ if (status & 0xFDFFA000)
+ pr_warn("%s: unexpected status %#x after switch",
+ mmc_hostname(card->host), status);
+ if (status & R1_SWITCH_ERROR)
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static int mmc_emergency_spi_set_crc(struct mmc_panic_host *host, int use)
+{
+ return -1;
+}
+
+static int mmc_emergency_send_cid(struct mmc_panic_host *host, u32 *cid)
+{
+ return -1;
+}
+/*
+ * reinit card:
+ * should also consider about the SPI host
+ */
+static int mmc_emergency_reinit_card(void)
+{
+ struct mmc_panic_host *host = panic_host;
+ struct mmc_card *card = host->card;
+ u32 ocr = host->ocr;
+ int err, ddr = 0;
+ u32 cid[4];
+ unsigned int max_dtr;
+
+ /*
+ * low the clock to be init clock
+ */
+ if (mmc_host_is_spi(host)) {
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ } else {
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ /*
+ * AS eMMC spec said, card init frequency cannot higher
+ * then 400Khz. But a good card should support for 400Khz
+ * frequence in initialize process.
+ */
+ host->ios.clock = 400000;
+ host->panic_ops->set_ios(host);
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ */
+ mmc_emergency_go_idle(host);
+
+ /* The extra bit indicates that we support high capacity */
+ err = mmc_emergency_send_op_cond(host, ocr | (1 << 30), NULL);
+ if (err)
+ goto err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_emergency_spi_set_crc(host, 1);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Fetch CID from card.
+ */
+ if (mmc_host_is_spi(host))
+ err = mmc_emergency_send_cid(host, cid);
+ else
+ err = mmc_emergency_all_send_cid(cid);
+ if (err)
+ goto err;
+
+ if (memcmp(cid, card->raw_cid, sizeof(cid)) != 0) {
+ err = -ENOENT;
+ goto err;
+ }
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_emergency_set_relative_addr(card);
+ if (err)
+ goto err;
+
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->panic_ops->set_ios(host);
+ }
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_emergency_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Activate high speed (if supported)
+ */
+ if ((card->ext_csd.hs_max_dtr != 0) &&
+ (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
+ err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1);
+ if (err && err != -EBADMSG)
+ goto err;
+
+ if (err) {
+ pr_warn("%s: switch to highspeed failed\n",
+ __func__);
+ err = 0;
+ } else {
+ mmc_card_set_highspeed(card);
+ host->ios.timing = MMC_TIMING_MMC_HS;
+ host->panic_ops->set_ios(host);
+ }
+ }
+
+ /*
+ * Compute bus speed.
+ */
+ max_dtr = (unsigned int)-1;
+
+ if (mmc_card_highspeed(card)) {
+ if (max_dtr > card->ext_csd.hs_max_dtr)
+ max_dtr = card->ext_csd.hs_max_dtr;
+ } else if (max_dtr > card->csd.max_dtr) {
+ max_dtr = card->csd.max_dtr;
+ }
+
+ host->ios.clock = max_dtr;
+ host->panic_ops->set_ios(host);
+
+ /*
+ * Activate wide bus.
+ * By default use SDR mode for panic write
+ */
+ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+ (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
+ unsigned ext_csd_bit, bus_width;
+
+ if (host->caps & MMC_CAP_8_BIT_DATA) {
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
+ bus_width = MMC_BUS_WIDTH_8;
+ } else {
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
+ bus_width = MMC_BUS_WIDTH_4;
+ }
+
+ err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH, ext_csd_bit);
+
+ if (err && err != -EBADMSG)
+ goto err;
+
+ if (err) {
+ pr_warn("%s: switch to bus %dbit failed\n",
+ __func__, 1 << bus_width);
+ err = 0;
+ } else {
+ ddr = MMC_SDR_MODE;
+ host->ios.bus_width = bus_width;
+ host->panic_ops->set_ios(host);
+ }
+ }
+
+ return 0;
+err:
+ return err;
+}
+
+/*
+ * mmc_emergency_write - write 512Bytes to card in panic mode
+ * @data: data pointer which should pointed to an area no more than
+ * 512Bytes
+ * @blk_id: the block id need to write this 512B data
+ *
+ * This function is supplied to ipanic driver to write 512B data
+ * in panic mode. Please also make sure the data size should not be
+ * larger than 512B, otherwise data lossing.
+ */
+int mmc_emergency_write(char *data, unsigned int blk_id)
+{
+ struct mmc_panic_host *host = panic_host;
+ int ret;
+ if (host == NULL) {
+ pr_err("%s: no device for panic record\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!host->panic_ready) {
+ pr_err("%s: device is not ready for panic record\n", __func__);
+ return -EPERM;
+ }
+
+ if (!data) {
+ pr_err("%s: invalided writing data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (blk_id > host->totalsecs || blk_id < 0) {
+ pr_err("%s: invalided writing blk_id\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * everything is OK. So, let's start panic record.
+ *
+ * Copy the message data to logbuf
+ */
+ memcpy(host->logbuf, data, SECTOR_SIZE);
+
+ /* hold Dekker mutex first */
+ if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+ ret = host->panic_ops->hold_mutex(host);
+ if (ret) {
+ pr_err("%s: hold Dekker mutex failed\n", __func__);
+ return ret;
+ }
+ }
+
+ ret = __mmc_emergency_write(blk_id);
+
+ /* release Dekker mutex */
+ if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+ host->panic_ops->release_mutex(host);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_write);
+
+/*
+ * mmc_emergency_init: init host controller and emmc card
+ * when kernel panic occures
+ *
+ * return value:
+ * 0 - init successfully
+ * negative value - Failed during init
+ * -ENODEV - emmc card was removed by driver
+ */
+int mmc_emergency_init(void)
+{
+ struct mmc_panic_host *host = panic_host;
+ int ret;
+ if (host == NULL) {
+ pr_err("%s: no device for panic record\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = mmc_emergency_prepare();
+ if (ret) {
+ pr_err("%s: prepare host controller failed\n", __func__);
+ return ret;
+ }
+
+ if (!host->panic_ops) {
+ pr_err("%s: no panic_ops for panic host\n", __func__);
+ return -EPERM;
+ }
+
+ /*
+ * prepare host controller
+ */
+ if (host->panic_ops->prepare)
+ host->panic_ops->prepare(host);
+
+ /*
+ * during init eMMC card, don't want to be interrupted by SCU FW
+ */
+ if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+ ret = host->panic_ops->hold_mutex(host);
+ if (ret) {
+ pr_err("%s: hold Dekker mutex failed\n", __func__);
+ return ret;
+ }
+ } else if (host->panic_ops->power_on)
+ /* don't have Dekker mutex, just power on host controller */
+ host->panic_ops->power_on(host);
+
+ /*
+ * reset card since we are not sure whether card is in a good status
+ *
+ * Since in panic mode, we init a old card, so all the command to be
+ * used has no data. So we can reuse the sdhci ops
+ */
+ ret = mmc_emergency_reinit_card();
+ if (ret) {
+ pr_info("%s: reinit card failed\n", __func__);
+ goto out;
+ }
+
+ /*
+ * OK. we are ready
+ */
+ mmc_emergency_ready();
+out:
+ /* release Dekker mutex */
+ if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+ host->panic_ops->release_mutex(host);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_init);
+
+/*
+ * mmc_emergency_setup - init panic_host which is used for panic writing
+ * @host: mmc host
+ *
+ * This function can sample some important value for panic_host use to init
+ * host controller and card. It only works for the driver which has already
+ * called mmc_alloc_panic_host in its probing process
+ */
+void mmc_emergency_setup(struct mmc_host *mmc)
+{
+ struct mmc_panic_host *host = panic_host;
+
+ /*
+ * mmc host has no panic host
+ */
+ if (!mmc->phost)
+ return;
+
+ /*
+ * before setup panic host, make sure panic host is
+ * allocated
+ */
+ if (host == NULL)
+ return;
+
+ /*
+ * panic host has already been setup
+ */
+ if (host->mmc)
+ return;
+
+ /*
+ * mmc host didn't init card
+ */
+ if (!mmc->card)
+ return;
+ /*
+ * if is SDIO card or SD card, by pass
+ */
+ if (mmc_card_sdio(mmc->card) ||
+ mmc_card_sd(mmc->card))
+ return;
+
+ host->card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
+ if (!host->card) {
+ pr_err("%s: cannot alloc mmc_card for panic host\n",
+ __func__);
+ return;
+ }
+
+ memcpy(host->card, mmc->card, sizeof(struct mmc_card));
+ host->caps = mmc->caps;
+ host->mmc = mmc;
+ host->ocr = mmc->ocr;
+ host->totalsecs = mmc_get_capacity(mmc->card);
+ host->max_blk_size = mmc->max_blk_size;
+ host->max_blk_count = mmc->max_blk_count;
+ host->max_req_size = mmc->max_req_size;
+ if (mmc_card_blockaddr(mmc->card))
+ host->blkaddr = 1;
+ /*
+ * sample ios values
+ */
+ memcpy(&host->ios, &mmc->ios, sizeof(struct mmc_ios));
+#ifdef CONFIG_MMC_CLKGATE
+ if (mmc->ios.clock == 0)
+ host->ios.clock = mmc->clk_old;
+#endif
+ if (host->panic_ops && host->panic_ops->setup)
+ host->panic_ops->setup(host);
+
+ return;
+}
+EXPORT_SYMBOL(mmc_emergency_setup);
+/*
+ * mmc_alloc_panic_host - used for host layer driver to alloc mmc_panic_host.
+ * @host: mmc host
+ * @ops: this is a pointer which points to mmc_host_panic_ops. This ops should
+ * be defined in host layer driver
+ *
+ * This function need to know the mmc_host_panic_ops, host layer driver should
+ * call this function during probing.
+ *
+ */
+void mmc_alloc_panic_host(struct mmc_host *host,
+ const struct mmc_host_panic_ops *ops)
+{
+ if (panic_host) {
+ pr_info("%s: already allocate panic host\n", __func__);
+ return;
+ }
+
+ panic_host = kzalloc(sizeof(struct mmc_panic_host), GFP_KERNEL);
+ if (!panic_host) {
+ pr_err("%s %s: panic structure allocate error\n",
+ __func__, mmc_hostname(host));
+ return;
+ }
+ /*
+ * allocate log buffer and DMA buffer
+ * log buffer size is 512
+ */
+ panic_host->logbuf = kzalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!panic_host->logbuf) {
+ pr_err("%s %s: log buf allocate error\n",
+ __func__, mmc_hostname(host));
+ goto free_panic_host;
+ }
+
+ panic_host->dmabuf = dma_map_single(host->parent, panic_host->logbuf,
+ SECTOR_SIZE, DMA_TO_DEVICE);
+ if (!panic_host->dmabuf) {
+ pr_err("%s %s: DMA buf allocate error\n",
+ __func__, mmc_hostname(host));
+ goto free_logbuf;
+ }
+
+ panic_host->panic_ops = ops;
+ panic_host->mmc = NULL;
+ host->phost = panic_host;
+
+ return;
+
+free_logbuf:
+ kfree(panic_host->logbuf);
+free_panic_host:
+ kfree(panic_host);
+}
+EXPORT_SYMBOL(mmc_alloc_panic_host);
{ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
static const unsigned int speed_unit[8] =
{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
-
-
typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
const unsigned char *, unsigned);
{ 0x22, 0, cistpl_funce },
};
+/***************************** WP B0 WA *******************************/
+
+unsigned char wp_tpl_codes[] = {
+ 0x21, 0x22, 0x20, 0x21, 0x22, 0x91, 0x15,
+};
+
+unsigned char wp_tpl_links[] = {
+ 0x2, 0x4, 0x4, 0x2, 0x2a, 0x2, 0x19,
+};
+
+unsigned char wp_tuple_data[7][42] = {
+ {
+ 12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+ {
+ 0, 0, 2, 11, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+ {
+ 137, 0, 96, 114, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+ {
+ 12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+ {
+ 1, 1, 48, 0, 0, 3, 0, 2, 0, 128,
+ 255, 0, 7, 0, 0, 7, 7, 255, 0, 16,
+ 0, 200, 100, 0, 0, 0, 0, 0, 16, 1,
+ 33, 2, 0, 0, 0, 0, 32, 4, 137, 0,
+ 96, 114
+ },
+ {
+ 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+ {
+ 8, 0, 73, 110, 116, 101, 108, 40, 82, 41,
+ 32, 87, 105, 114, 101, 108, 101, 115, 115, 32,
+ 67, 111, 114, 101, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0
+ },
+};
+
+/**********************************************************************/
+
static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
{
int ret;
struct sdio_func_tuple *this, **prev;
unsigned i, ptr = 0;
-
+ int count = 0;
+ bool replace = false;
/*
* Note that this works for the common CIS (function number 0) as
* well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
fn = 0;
ret = mmc_io_rw_direct(card, 0, 0,
- SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
+ SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i,
+ 0, &x);
if (ret)
return ret;
ptr |= x << (i * 8);
BUG_ON(*prev);
+ if (card->quirks & MMC_QUIRK_NON_STD_CIS)
+ count = (func) ? 2 : -1;
+
do {
unsigned char tpl_code, tpl_link;
+ if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+ count++;
+ if ((func && (count > 6)) || (!func && (count > 2))) {
+ pr_debug("%s: break: count %d\n",
+ __func__, count);
+ break;
+ }
+ }
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
break;
-
+ if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+ /* if the first tuple is 0 - then it's b0, so replace */
+ if ((count < 4) && (tpl_code == 0)) {
+ pr_info("%s card with non std CIS",
+ mmc_hostname(card->host));
+ /* disable UHS on buggy cards */
+ card->sw_caps.sd3_bus_mode = 0;
+ replace = true;
+ }
+ }
/* 0xff means we're done */
if (tpl_code == 0xff)
break;
/* null entries have no link field or data */
- if (tpl_code == 0x00)
- continue;
+ if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+ if ((tpl_code == 0x00) && (!replace))
+ continue;
+ } else {
+ if (tpl_code == 0x00)
+ continue;
+ }
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
if (ret)
ptr + i, 0, &this->data[i]);
if (ret)
break;
+ pr_debug("%d, ", this->data[i]);
}
if (ret) {
kfree(this);
}
/* Try to parse the CIS tuple */
- ret = cis_tpl_parse(card, func, "CIS",
- cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
- tpl_code, this->data, tpl_link);
+ if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+ if (!replace)
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list,
+ ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data,
+ tpl_link);
+ else
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list,
+ ARRAY_SIZE(cis_tpl_list),
+ wp_tpl_codes[count],
+ wp_tuple_data[count],
+ wp_tpl_links[count]);
+ } else {
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list,
+ ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data,
+ tpl_link);
+ }
+
if (ret == -EILSEQ || ret == -ENOENT) {
/*
* The tuple is unknown or known but not parsed.
#include <linux/pm_runtime.h>
#include <linux/mmc/sdhci-pci-data.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_scu_pmic.h>
+
#include "sdhci.h"
+/* Settle down values copied from broadcom reference design. */
+#define DELAY_CARD_INSERTED 200
+#define DELAY_CARD_REMOVED 50
+
/*
* PCI device IDs
*/
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
-#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
-#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
/*
* PCI registers
#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
#define MAX_SLOTS 8
+#define IPC_EMMC_MUTEX_CMD 0xEE
+
+/* CLV SD card power resource */
+
+#define VCCSDIO_ADDR 0xd5
+#define VCCSDIO_OFF 0x4
+#define VCCSDIO_NORMAL 0x7
+#define ENCTRL0_ISOLATE 0x55555557
+#define ENCTRL1_ISOLATE 0x5555
+#define STORAGESTIO_FLISNUM 0x8
+#define ENCTRL0_OFF 0x10
+#define ENCTRL1_OFF 0x11
struct sdhci_pci_chip;
struct sdhci_pci_slot;
int rst_n_gpio;
int cd_gpio;
int cd_irq;
+ bool dev_power;
+ struct mutex power_lock;
+ bool dma_enabled;
};
struct sdhci_pci_chip {
unsigned int quirks;
unsigned int quirks2;
bool allow_runtime_pm;
+ unsigned int autosuspend_delay;
const struct sdhci_pci_fixes *fixes;
int num_slots; /* Slots on controller */
struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
+
+ unsigned int enctrl0_orig;
+ unsigned int enctrl1_orig;
};
#endif
+#define MFD_SDHCI_DEKKER_BASE 0xffff7fb0
+static void mfd_emmc_mutex_register(struct sdhci_pci_slot *slot)
+{
+ u32 mutex_var_addr;
+#ifdef CONFIG_INTEL_SCU_IPC
+ int err;
+
+ err = rpmsg_send_generic_command(IPC_EMMC_MUTEX_CMD, 0,
+ NULL, 0, &mutex_var_addr, 1);
+ if (err) {
+ dev_err(&slot->chip->pdev->dev, "IPC error: %d\n", err);
+ dev_info(&slot->chip->pdev->dev, "Specify mutex address\n");
+ /*
+ * Since we failed to get mutex sram address, specify it
+ */
+ mutex_var_addr = MFD_SDHCI_DEKKER_BASE;
+ }
+#else
+ mutex_var_addr = MFD_SDHCI_DEKKER_BASE;
+#endif
+
+ /* 3 housekeeping mutex variables, 12 bytes length */
+ slot->host->sram_addr = ioremap_nocache(mutex_var_addr,
+ 3 * sizeof(u32));
+ if (!slot->host->sram_addr)
+ dev_err(&slot->chip->pdev->dev, "ioremap failed!\n");
+ else {
+ dev_info(&slot->chip->pdev->dev, "mapped addr: %p\n",
+ slot->host->sram_addr);
+ dev_info(&slot->chip->pdev->dev,
+ "current eMMC owner: %d, IA req: %d, SCU req: %d\n",
+ readl(slot->host->sram_addr +
+ DEKKER_EMMC_OWNER_OFFSET),
+ readl(slot->host->sram_addr +
+ DEKKER_IA_REQ_OFFSET),
+ readl(slot->host->sram_addr +
+ DEKKER_SCU_REQ_OFFSET));
+ }
+ spin_lock_init(&slot->host->dekker_lock);
+}
+
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
+ switch (slot->chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+ mfd_emmc_mutex_register(slot);
+ sdhci_alloc_panic_host(slot->host);
+ slot->host->mmc->caps2 |= MMC_CAP2_INIT_CARD_SYNC;
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+ break;
+ }
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
MMC_CAP2_HC_ERASE_SZ;
return 0;
}
+static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ switch (slot->chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+ if (slot->host->sram_addr)
+ iounmap(slot->host->sram_addr);
+ break;
+ case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+ break;
+ }
+}
+
static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
return 0;
}
+#ifdef CONFIG_INTEL_SCU_FLIS
+/*
+ * Save the current power and shim status, if they are on, turn them off.
+ */
+static int ctp_sd_card_power_save(struct sdhci_pci_slot *slot)
+{
+ int err;
+ u16 addr;
+ u8 data;
+ struct sdhci_pci_chip *chip;
+
+ if (!slot->dev_power)
+ return 0;
+
+ chip = slot->chip;
+ err = intel_scu_ipc_read_shim(&chip->enctrl0_orig,
+ STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL0 read failed, err %d\n",
+ chip->pdev->device, err);
+ chip->enctrl0_orig = ENCTRL0_ISOLATE;
+ chip->enctrl1_orig = ENCTRL1_ISOLATE;
+ /*
+ * stop to shut down VCCSDIO, since we cannot recover
+ * it.
+ * this should not block system entering S3
+ */
+ return 0;
+ }
+ err = intel_scu_ipc_read_shim(&chip->enctrl1_orig,
+ STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL1 read failed, err %d\n",
+ chip->pdev->device, err);
+ chip->enctrl0_orig = ENCTRL0_ISOLATE;
+ chip->enctrl1_orig = ENCTRL1_ISOLATE;
+ /*
+ * stop to shut down VCCSDIO, since we cannot recover
+ * it.
+ * this should not block system entering S3
+ */
+ return 0;
+ }
+
+ /* isolate shim */
+ err = intel_scu_ipc_write_shim(ENCTRL0_ISOLATE,
+ STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL0 ISOLATE failed, err %d\n",
+ chip->pdev->device, err);
+ /*
+ * stop to shut down VCCSDIO. Without isolate shim, the power
+ * may have leak if turn off VCCSDIO.
+ * during S3 resuming, shim and VCCSDIO will be recofigured
+ * this should not block system entering S3
+ */
+ return 0;
+ }
+
+ err = intel_scu_ipc_write_shim(ENCTRL1_ISOLATE,
+ STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL1 ISOLATE failed, err %d\n",
+ chip->pdev->device, err);
+ /*
+ * stop to shut down VCCSDIO. Without isolate shim, the power
+ * may have leak if turn off VCCSDIO.
+ * during S3 resuming, shim and VCCSDIO will be recofigured
+ * this should not block system entering S3
+ */
+ return 0;
+ }
+
+ addr = VCCSDIO_ADDR;
+ data = VCCSDIO_OFF;
+ err = intel_scu_ipc_writev(&addr, &data, 1);
+ if (err) {
+ pr_err("SDHCI device %04X: VCCSDIO turn off failed, err %d\n",
+ chip->pdev->device, err);
+ /*
+ * during S3 resuming, VCCSDIO will be recofigured
+ * this should not block system entering S3.
+ */
+ }
+
+ slot->dev_power = false;
+ return 0;
+}
+
+/*
+ * Restore the power and shim if they are original on.
+ */
+static int ctp_sd_card_power_restore(struct sdhci_pci_slot *slot)
+{
+ int err;
+ u16 addr;
+ u8 data;
+ struct sdhci_pci_chip *chip;
+
+ if (slot->dev_power)
+ return 0;
+
+ chip = slot->chip;
+
+ addr = VCCSDIO_ADDR;
+ data = VCCSDIO_NORMAL;
+ err = intel_scu_ipc_writev(&addr, &data, 1);
+ if (err) {
+ pr_err("SDHCI device %04X: VCCSDIO turn on failed, err %d\n",
+ chip->pdev->device, err);
+ /*
+ * VCCSDIO trun on failed. This may impact the SD card
+ * init and the read/write functions. We just report a
+ * warning, and go on to have a try. Anyway, SD driver
+ * can encounter error if powering up is failed
+ */
+ WARN_ON(err);
+ }
+
+ if (chip->enctrl0_orig == ENCTRL0_ISOLATE &&
+ chip->enctrl1_orig == ENCTRL1_ISOLATE)
+ /* means we needn't to reconfigure the shim */
+ return 0;
+
+ /* reconnect shim */
+ err = intel_scu_ipc_write_shim(chip->enctrl0_orig,
+ STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL0 CONNECT shim failed, err %d\n",
+ chip->pdev->device, err);
+ /* keep on setting enctrl1, but report a waring */
+ WARN_ON(err);
+ }
+
+ err = intel_scu_ipc_write_shim(chip->enctrl1_orig,
+ STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+ if (err) {
+ pr_err("SDHCI device %04X: ENCTRL1 CONNECT shim failed, err %d\n",
+ chip->pdev->device, err);
+ /* leave this error to driver, but report a warning */
+ WARN_ON(err);
+ }
+
+ slot->dev_power = true;
+ return 0;
+}
+#else
+static int ctp_sd_card_power_save(struct sdhci_pci_slot *slot)
+{
+ return 0;
+}
+static int ctp_sd_card_power_restore(struct sdhci_pci_slot *slot)
+{
+ return 0;
+}
+#endif
+
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
.probe_slot = mrst_hc_probe_slot,
.probe = mrst_hc_probe,
};
+static int ctp_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+#ifdef CONFIG_INTEL_SCU_IPC
+ int err;
+ u16 addr;
+#endif
+ u8 data = VCCSDIO_OFF;
+
+ if (!slot || !slot->chip || !slot->chip->pdev)
+ return -ENODEV;
+
+ if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_CLV_SDIO0)
+ return 0;
+
+ mutex_init(&slot->power_lock);
+
+ slot->host->flags |= SDHCI_POWER_CTRL_DEV;
+
+#ifdef CONFIG_INTEL_SCU_IPC
+ addr = VCCSDIO_ADDR;
+ err = intel_scu_ipc_readv(&addr, &data, 1);
+ if (err) {
+ /* suppose dev_power is true */
+ slot->dev_power = true;
+ return 0;
+ }
+#endif
+ if (data == VCCSDIO_NORMAL)
+ slot->dev_power = true;
+ else
+ slot->dev_power = false;
+
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
+ .probe_slot = ctp_sd_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
.probe_slot = mfd_emmc_probe_slot,
+ .remove_slot = mfd_emmc_remove_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+
return 0;
}
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+ switch (slot->chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_SDIO:
+ /* add a delay after runtime resuming back from D0i3 */
+ slot->chip->pdev->d3_delay = 10;
+ /* reduce the auto suspend delay for SDIO to be 500ms */
+ slot->chip->autosuspend_delay = 500;
+ break;
+ }
+
return 0;
}
-static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+#define TNG_IOAPIC_IDX 0xfec00000
+static void mrfl_ioapic_rte_reg_addr_map(struct sdhci_pci_slot *slot)
+{
+ slot->host->rte_addr = ioremap_nocache(TNG_IOAPIC_IDX, 256);
+ if (!slot->host->rte_addr)
+ dev_err(&slot->chip->pdev->dev, "rte_addr ioremap fail!\n");
+ else
+ dev_info(&slot->chip->pdev->dev, "rte_addr mapped addr: %p\n",
+ slot->host->rte_addr);
+}
+
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0 0
+#define INTEL_MRFL_EMMC_1 1
+#define INTEL_MRFL_SD 2
+#define INTEL_MRFL_SDIO 3
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int ret = 0;
+
+ switch (PCI_FUNC(slot->chip->pdev->devfn)) {
+ case INTEL_MRFL_EMMC_0:
+ sdhci_alloc_panic_host(slot->host);
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+ MMC_CAP_NONREMOVABLE |
+ MMC_CAP_1_8V_DDR;
+ slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+ MMC_CAP2_INIT_CARD_SYNC |
+ MMC_CAP2_CACHE_CTRL;
+ if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+ slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ /* WA for async abort silicon issue */
+ slot->host->quirks2 |= SDHCI_QUIRK2_2MS_DELAY |
+ SDHCI_QUIRK2_WAIT_FOR_IDLE |
+ SDHCI_QUIRK2_TUNING_POLL;
+ }
+ mrfl_ioapic_rte_reg_addr_map(slot);
+ break;
+ case INTEL_MRFL_SD:
+ slot->host->quirks2 |= SDHCI_QUIRK2_WAIT_FOR_IDLE;
+ /* Force 3.3V signal voltage */
+ slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ slot->host->mmc->caps2 |= MMC_CAP2_FIXED_NCRC;
+ break;
+ case INTEL_MRFL_SDIO:
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+ }
+
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+ slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+ slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+ }
+
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+ slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW) {
+ dev_info(&slot->chip->pdev->dev, "Disable MMC Func %d.\n",
+ PCI_FUNC(slot->chip->pdev->devfn));
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static void intel_mrfl_mmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ if (PCI_FUNC(slot->chip->pdev->devfn) == INTEL_MRFL_EMMC_0)
+ if (slot->host->rte_addr)
+ iounmap(slot->host->rte_addr);
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+ SDHCI_QUIRK2_HIGH_SPEED_SET_LATE |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
- .probe_slot = byt_emmc_probe_slot,
+ .probe_slot = intel_mrfl_mmc_probe_slot,
+ .remove_slot = intel_mrfl_mmc_remove_slot,
};
-static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
- .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+static int intel_moor_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+ MMC_CAP_NONREMOVABLE |
+ MMC_CAP_1_8V_DDR;
+
+ sdhci_alloc_panic_host(slot->host);
+
+ slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+ MMC_CAP2_INIT_CARD_SYNC;
+
+ /* Enable HS200 and HS400 */
+ slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+
+ if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+ slot->host->mmc->caps2 |= MMC_CAP2_HS400_1_8V_DDR;
+ }
+
+ if (slot->data)
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+ slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+ slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+ slot->host->mmc->caps2 &= ~MMC_CAP2_HS200_1_8V_SDR;
+ if (slot->chip->pdev->revision == 0x1) {
+ slot->host->mmc->caps2 &=
+ ~MMC_CAP2_HS400_1_8V_DDR;
+ }
+ }
+
+ if (slot->data)
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+ slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
+ return 0;
+}
+
+static void intel_moor_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static int intel_moor_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int ret = 0;
+
+ if (slot->data)
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+ ret = -ENODEV;
+
+ return ret;
+}
+
+static void intel_moor_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static int intel_moor_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int ret = 0;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ if (slot->data)
+ if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+ ret = -ENODEV;
+
+ return ret;
+}
+
+static void intel_moor_sdio_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_moor_emmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+ SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
.allow_runtime_pm = true,
- .probe_slot = byt_sdio_probe_slot,
+ .probe_slot = intel_moor_emmc_probe_slot,
+ .remove_slot = intel_moor_emmc_remove_slot,
};
-static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+static const struct sdhci_pci_fixes sdhci_intel_moor_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+ SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
+ .allow_runtime_pm = true,
+ .probe_slot = intel_moor_sd_probe_slot,
+ .remove_slot = intel_moor_sd_remove_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_moor_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+ SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
+ .allow_runtime_pm = true,
+ .probe_slot = intel_moor_sdio_probe_slot,
+ .remove_slot = intel_moor_sdio_remove_slot,
};
/* O2Micro extra registers */
{
.vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BYT_EMMC,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BYT_SD,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
},
{
* *
\*****************************************************************************/
+static int try_request_regulator(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ struct sdhci_host *host;
+ int i;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+ host = slot->host;
+ if (!host)
+ continue;
+ if (sdhci_try_get_regulator(host) == 0)
+ mmc_detect_change(host->mmc, 0);
+ }
+ return 0;
+}
+
+static struct pci_driver sdhci_driver;
+
+/**
+ * sdhci_pci_request_regulators - retry requesting regulators of
+ * all sdhci-pci devices
+ *
+ * One some platforms, the regulators associated to the mmc are available
+ * late in the boot.
+ * sdhci_pci_request_regulators() is called by platform code to retry
+ * getting the regulators associated to pci sdhcis
+ */
+
+int sdhci_pci_request_regulators(void)
+{
+ /* driver not yet registered */
+ if (!sdhci_driver.driver.p)
+ return 0;
+ return driver_for_each_device(&sdhci_driver.driver,
+ NULL, NULL, try_request_regulator);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_request_regulators);
+
static int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
int ret;
slot = sdhci_priv(host);
+ if (slot->dma_enabled)
+ return 0;
+
pdev = slot->chip->pdev;
if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
pci_set_master(pdev);
+ slot->dma_enabled = true;
+
return 0;
}
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
int rst_n_gpio = slot->rst_n_gpio;
+ u8 pwr;
+
+ if (gpio_is_valid(rst_n_gpio)) {
+ gpio_set_value_cansleep(rst_n_gpio, 0);
+ /* For eMMC, minimum is 1us but give it 10us for good measure */
+ udelay(10);
+ gpio_set_value_cansleep(rst_n_gpio, 1);
+ /*
+ * For eMMC, minimum is 200us,
+ * but give it 300us for good measure
+ */
+ usleep_range(300, 1000);
+ } else if (slot->host->mmc->caps & MMC_CAP_HW_RESET) {
+ /* first set bit4 of power control register */
+ pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ pwr |= SDHCI_HW_RESET;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ /* keep the same delay for safe */
+ usleep_range(300, 1000);
+ /* then clear bit4 of power control register */
+ pwr &= ~SDHCI_HW_RESET;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ /* keep the same delay for safe */
+ usleep_range(300, 1000);
+ }
+}
- if (!gpio_is_valid(rst_n_gpio))
+static int sdhci_pci_power_up_host(struct sdhci_host *host)
+{
+ int ret = -ENOSYS;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+ if (slot->data && slot->data->power_up)
+ ret = slot->data->power_up(host);
+ else {
+ /*
+ * use standard PCI power up function
+ */
+ ret = pci_set_power_state(slot->chip->pdev, PCI_D0);
+ mdelay(50);
+ }
+ /*
+ * If there is no power_up callbacks in platform data,
+ * return -ENOSYS;
+ */
+ if (ret)
+ return ret;
+
+ /*
+ * after power up host, let's have a little test
+ */
+
+ if (sdhci_readl(host, SDHCI_HOST_VERSION) ==
+ 0xffffffff) {
+ pr_err("%s: power up sdhci host failed\n",
+ __func__);
+ return -EPERM;
+ }
+
+ pr_info("%s: host controller power up is done\n", __func__);
+
+ return 0;
+}
+
+static void sdhci_pci_set_dev_power(struct sdhci_host *host, bool poweron)
+{
+ struct sdhci_pci_slot *slot;
+ struct sdhci_pci_chip *chip;
+
+ slot = sdhci_priv(host);
+ if (slot)
+ chip = slot->chip;
+ else
return;
- gpio_set_value_cansleep(rst_n_gpio, 0);
- /* For eMMC, minimum is 1us but give it 10us for good measure */
- udelay(10);
- gpio_set_value_cansleep(rst_n_gpio, 1);
- /* For eMMC, minimum is 200us but give it 300us for good measure */
- usleep_range(300, 1000);
+
+ /* only available for Intel CTP platform */
+ if (chip && chip->pdev &&
+ chip->pdev->device == PCI_DEVICE_ID_INTEL_CLV_SDIO0) {
+ mutex_lock(&slot->power_lock);
+ if (poweron)
+ ctp_sd_card_power_restore(slot);
+ else
+ ctp_sd_card_power_save(slot);
+ mutex_unlock(&slot->power_lock);
+ }
+}
+
+static int sdhci_pci_get_cd(struct sdhci_host *host)
+{
+ bool present;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+ if (gpio_is_valid(slot->cd_gpio))
+ return gpio_get_value(slot->cd_gpio) ? 0 : 1;
+
+ /* If nonremovable or polling, assume that the card is always present */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = true;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ return present;
+}
+
+static int sdhci_pci_get_tuning_count(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ int tuning_count = 0;
+
+ switch (slot->chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_EMMC45:
+ tuning_count = 4; /* using 8 seconds, this can be tuning */
+ break;
+ case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+ tuning_count = 8; /* using 128 seconds, this can be tuning */
+ break;
+ default:
+ break;
+ }
+
+ return tuning_count;
+}
+
+static int sdhci_gpio_buf_check(struct sdhci_host *host, unsigned int clk)
+{
+ int ret = -ENOSYS;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+ if (slot->data && slot->data->flis_check)
+ ret = slot->data->flis_check(host, clk);
+
+ return ret;
}
static const struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
.platform_bus_width = sdhci_pci_bus_width,
.hw_reset = sdhci_pci_hw_reset,
+ .power_up_host = sdhci_pci_power_up_host,
+ .set_dev_power = sdhci_pci_set_dev_power,
+ .get_cd = sdhci_pci_get_cd,
+ .get_tuning_count = sdhci_pci_get_tuning_count,
+ .gpio_buf_check = sdhci_gpio_buf_check,
};
/*****************************************************************************\
sdhci_enable_irq_wakeups(slot->host);
pm_flags |= slot_pm_flags;
+ slot->dma_enabled = false;
}
if (chip->fixes && chip->fixes->suspend) {
goto err_pci_suspend;
}
- pci_save_state(pdev);
- if (pm_flags & MMC_PM_KEEP_POWER) {
- if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
- pci_pme_active(pdev, true);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
- pci_set_power_state(pdev, PCI_D3hot);
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
return 0;
err_pci_suspend:
if (!chip)
return 0;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
if (chip->fixes && chip->fixes->resume) {
ret = chip->fixes->resume(chip);
if (ret)
.runtime_idle = sdhci_pci_runtime_idle,
};
+static void sdhci_hsmmc_virtual_detect(void *dev_id, int carddetect)
+{
+ struct sdhci_host *host = dev_id;
+
+ if (carddetect)
+ mmc_detect_change(host->mmc,
+ msecs_to_jiffies(DELAY_CARD_INSERTED));
+ else
+ mmc_detect_change(host->mmc,
+ msecs_to_jiffies(DELAY_CARD_REMOVED));
+}
+
+
/*****************************************************************************\
* *
* Device probing/removal *
slot->rst_n_gpio = -EINVAL;
slot->cd_gpio = -EINVAL;
+ host->hw_name = "PCI";
+ host->ops = &sdhci_pci_ops;
+ host->quirks = chip->quirks;
+ host->quirks2 = chip->quirks2;
+
/* Retrieve platform data if there is any */
if (*sdhci_pci_get_data)
slot->data = sdhci_pci_get_data(pdev, slotno);
if (slot->data) {
+ slot->data->pdev = pdev;
if (slot->data->setup) {
ret = slot->data->setup(slot->data);
if (ret) {
}
slot->rst_n_gpio = slot->data->rst_n_gpio;
slot->cd_gpio = slot->data->cd_gpio;
+
+ if (slot->data->quirks)
+ host->quirks2 |= slot->data->quirks;
+
+ if (slot->data->register_embedded_control)
+ slot->data->register_embedded_control(host,
+ sdhci_hsmmc_virtual_detect);
}
- host->hw_name = "PCI";
- host->ops = &sdhci_pci_ops;
- host->quirks = chip->quirks;
- host->quirks2 = chip->quirks2;
host->irq = pdev->irq;
host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
host->mmc->slotno = slotno;
+
+ if (host->quirks2 & SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE)
+ host->mmc->caps &= ~MMC_CAP_NONREMOVABLE;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
ret = sdhci_add_host(host);
sdhci_free_host(slot->host);
}
-static void sdhci_pci_runtime_pm_allow(struct device *dev)
+static void sdhci_pci_runtime_pm_allow(struct sdhci_pci_chip *chip)
{
+ struct device *dev;
+
+ if (!chip || !chip->pdev)
+ return;
+
+ dev = &chip->pdev->dev;
pm_runtime_put_noidle(dev);
pm_runtime_allow(dev);
- pm_runtime_set_autosuspend_delay(dev, 50);
+ if (chip->autosuspend_delay)
+ pm_runtime_set_autosuspend_delay(dev, chip->autosuspend_delay);
+ else
+ pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, 1);
}
if (slots == 0)
return -ENODEV;
- BUG_ON(slots > MAX_SLOTS);
+ if (slots > MAX_SLOTS) {
+ dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+ return -ENODEV;
+ }
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
- if (first_bar > 5) {
+ if (first_bar > 4) {
dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
return -ENODEV;
}
}
slots = chip->num_slots; /* Quirk may have changed this */
+ /* slots maybe changed again, so check again */
+ if (slots > MAX_SLOTS) {
+ dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+ goto free;
+ }
for (i = 0; i < slots; i++) {
slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
}
if (chip->allow_runtime_pm)
- sdhci_pci_runtime_pm_allow(&pdev->dev);
+ sdhci_pci_runtime_pm_allow(chip);
return 0;
pci_disable_device(pdev);
}
+static void sdhci_pci_shutdown(struct pci_dev *pdev)
+{
+ struct sdhci_pci_chip *chip;
+ int i;
+
+ chip = pci_get_drvdata(pdev);
+
+ if (!chip || !chip->pdev)
+ return;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_CLV_SDIO0:
+ for (i = 0; i < chip->num_slots; i++) {
+ if (chip->slots[i]->host->flags & SDHCI_POWER_CTRL_DEV)
+ ctp_sd_card_power_save(chip->slots[i]);
+ }
+ break;
+ case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+ if (chip->allow_runtime_pm) {
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static struct pci_driver sdhci_driver = {
.name = "sdhci-pci",
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = sdhci_pci_remove,
+ .shutdown = sdhci_pci_shutdown,
.driver = {
.pm = &sdhci_pci_pm_ops
},
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios);
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host);
sdhci_set_card_detection(host, false);
}
+static void sdhci_busy_wait(struct mmc_host *mmc, u32 delay)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /* totally 'delay' us, each loop 4us */
+ u32 loop = delay / 4;
+ while (loop) {
+ /* have a delay here */
+ udelay(4);
+ /* read register to make sure host won't be clock gated */
+ sdhci_readw(host, SDHCI_HOST_VERSION);
+ loop--;
+ }
+}
+
static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
host->clock = 0;
/* Wait max 100 ms */
- timeout = 100;
+ timeout = 10000;
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
return;
}
timeout--;
- mdelay(1);
+ udelay(10);
}
if (host->ops->platform_reset_exit)
{
u8 ctrl;
+ if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+ return;
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl |= SDHCI_CTRL_LED;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
{
u8 ctrl;
+ if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+ return;
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_LED;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
+ if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+ return;
+
spin_lock_irqsave(&host->lock, flags);
if (host->runtime_suspended)
* upon error conditions.
*/
if (data->error) {
+ if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+ sdhci_busy_wait(host->mmc, 1000);
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
}
WARN_ON(host->cmd);
/* Wait max 10 ms */
- timeout = 10;
+ timeout = 1000;
mask = SDHCI_CMD_INHIBIT;
if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
return;
}
timeout--;
- mdelay(1);
+ udelay(10);
}
mod_timer(&host->timer, jiffies + 10 * HZ);
host->cmd = cmd;
+ host->r1b_busy_end = 0;
sdhci_prepare_data(host, cmd);
case SDHCI_CTRL_UHS_DDR50:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
break;
+ case SDHCI_CTRL_HS_DDR200:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
+ break;
default:
pr_warn("%s: Invalid UHS-I mode selected\n",
mmc_hostname(host->mmc));
if (clock == 0)
goto out;
+ /*
+ * Check and change Host Controller pin GPIO buffer setting
+ * according to the new clock will be used.
+ * For example, when the SD bus frequency is 50MHz or 200MHz,
+ * the controller SD bus CLK/CMD/DAT pin may need different
+ * driving strength and slew settings.
+ * So we add check here. And this API will also change the pin
+ * gpio buffer settings if needed after the check. Of course,
+ * it's platform specific behaviours.
+ * To ensure that the clock signal does not change when gpio
+ * buffer setting modified, we'd better disable SD bus clock
+ * first before changing any gpio pin buffer settings and
+ * enable the SD bus clock again after the changing.
+ */
+ if (host->ops->gpio_buf_check)
+ host->ops->gpio_buf_check(host, clock);
+
if (host->version >= SDHCI_SPEC_300) {
if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
SDHCI_CTRL_PRESET_VAL_ENABLE) {
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
- timeout = 20;
+ timeout = 2000;
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
return;
}
timeout--;
- mdelay(1);
+ udelay(10);
}
clk |= SDHCI_CLOCK_CARD_EN;
return power;
}
+/*
+ * One of the Medfield eMMC controller (PCI device id 0x0823, SDIO3) is
+ * a shared resource used by the SCU and the IA processors. SCU primarily
+ * uses the eMMC host controller to access the eMMC device's Boot Partition,
+ * while the IA CPU uses the eMMC host controller to access the eMMC device's
+ * User Partition.
+ *
+ * After the SCU hands off the system to the IA processor, the IA processor
+ * assumes ownership to the eMMC host controller. Due to absence of any
+ * arbitration at the eMMC host controller, this could result in concurrent
+ * eMMC host accesses resulting in bus contention and garbage data ending up
+ * in either of the partitions.
+ * To circumvent this from happening, eMMC host controller locking mechanism
+ * is employed, where at any one given time, only one agent, SCU or IA, may be
+ * allowed to access the host. This is achieved by implementing Dekker's
+ * Algorithm (http://en.wikipedia.org/wiki/Dekker's_algorithm) between the
+ * two processors.
+ *
+ * Before handing off the system to the IA processor, SCU must set up three
+ * housekeeping mutex variables allocated in the shared SRAM as follows:
+ *
+ * eMMC_Owner = IA (SCU and IA processors - RW, 32bit)
+ * IA_Req = FALSE (IA -RW, SCU - RO, 32bit)
+ * SCU_Req = FALSE (IA - RO, SCU - R/W, 32bit)
+ *
+ * There is no hardware based access control to these variables and so code
+ * executing on SCU and IA processors must follow below access rules
+ * (Dekker's algorithm):
+ *
+ * -----------------------------------------
+ * SCU Processor Implementation
+ * -----------------------------------------
+ * SCU_Req = TRUE;
+ * while (IA_Req == TRUE) {
+ * if (eMMC_Owner != SCU){
+ * SCU_Req = FALSE;
+ * while (eMMC_Owner != SCU);
+ * SCU_Req = TRUE;
+ * }
+ * }
+ * // SCU now performs eMMC transactions here
+ * ...
+ * // When done, relinquish control to IA
+ * eMMC_Owner = IA;
+ * SCU_Req = FALSE;
+ *
+ * -----------------------------------------
+ * IA Processor Implementation
+ * -----------------------------------------
+ * IA_Req = TRUE;
+ * while (SCU_Req == TRUE) {
+ * if (eMMC_Owner != IA){
+ * IA_Req = FALSE;
+ * while (eMMC_Owner != IA);
+ * IA_Req = TRUE;
+ * }
+ * }
+ * //IA now performs eMMC transactions here
+ * ...
+ * //When done, relinquish control to SCU
+ * eMMC_Owner = SCU;
+ * IA_Req = FALSE;
+ *
+ * ----------------------------------------
+ *
+ * sdhci_do_acquire_ownership- implement the Dekker's algorithm on IA side
+ * This function is only used for acquire ownership, not to re-cofnig host
+ * controller. Since in some scenarios, re-config is not useless. We can
+ * save some unused expenses.
+ * @mmc: mmc host
+ *
+ * @return return value:
+ * 0 - Acquried the ownership successfully. The last owner is IA
+ * 1 - Acquried the ownership successfully. The last owenr is SCU
+ * -EBUSY - failed to acquire ownership within the timeout period
+ */
+static int sdhci_do_acquire_ownership(struct mmc_host *mmc)
+{
+ struct sdhci_host *host;
+ unsigned long t1, t2;
+ unsigned long flags;
+
+ host = mmc_priv(mmc);
+
+ if (!host->sram_addr)
+ return 0;
+
+ /* if host has sram_addr, dekker_lock is initialized */
+ spin_lock_irqsave(&host->dekker_lock, flags);
+
+ host->usage_cnt++;
+
+ /* If IA has already hold the eMMC mutex, then just exit */
+ if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET)) {
+ spin_unlock_irqrestore(&host->dekker_lock, flags);
+ return 0;
+ }
+
+ DBG("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+ writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+ t1 = jiffies + 10 * HZ;
+ t2 = 500;
+
+ while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+ if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+ DEKKER_OWNER_IA) {
+ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+ while (t2) {
+ if (readl(host->sram_addr +
+ DEKKER_EMMC_OWNER_OFFSET) ==
+ DEKKER_OWNER_IA)
+ break;
+ spin_unlock_irqrestore(&host->dekker_lock,
+ flags);
+ usleep_range(8000, 12000);
+ spin_lock_irqsave(&host->dekker_lock, flags);
+ t2--;
+ }
+ if (t2)
+ writel(1, host->sram_addr +
+ DEKKER_IA_REQ_OFFSET);
+ else
+ goto timeout;
+ }
+ if (time_after(jiffies, t1))
+ goto timeout;
+
+ cpu_relax();
+ }
+
+ spin_unlock_irqrestore(&host->dekker_lock, flags);
+ /*
+ * if the last owner is SCU, will do the re-config host controller
+ * in the next
+ */
+ return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+ DEKKER_OWNER_IA) ? 1 : 0;
+
+timeout:
+ pr_err(KERN_ERR "eMMC mutex timeout!\n"
+ "Dump Dekker's house keeping variables -"
+ "eMMC owner: %d, IA req: %d, SCU req: %d\n",
+ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+ /* Release eMMC mutex anyway */
+ writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+ spin_unlock_irqrestore(&host->dekker_lock, flags);
+
+ return -EBUSY;
+}
+
+static int sdhci_acquire_ownership(struct mmc_host *mmc)
+{
+ int ret;
+
+ ret = sdhci_do_acquire_ownership(mmc);
+ if (ret) {
+ struct sdhci_host *host;
+ host = mmc_priv(mmc);
+ /* Re-config HC in case SCU has changed HC reg already */
+ pm_runtime_get_sync(mmc->parent);
+ /*
+ * reinit host registers.
+ * include reset host controller all,
+ * reconfigure clock, pwr and other registers.
+ */
+ sdhci_init(host, 0);
+ host->clock = 0;
+ host->pwr = 0;
+ sdhci_do_set_ios(host, &host->mmc->ios);
+ pm_runtime_put(mmc->parent);
+ }
+
+ return ret;
+}
+
+static void sdhci_release_ownership(struct mmc_host *mmc)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = mmc_priv(mmc);
+
+ if (!host->sram_addr)
+ return;
+
+ spin_lock_irqsave(&host->dekker_lock, flags);
+ BUG_ON(host->usage_cnt == 0);
+ host->usage_cnt--;
+ if (host->usage_cnt == 0) {
+ writel(DEKKER_OWNER_SCU,
+ host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+ DBG("Exit ownership-eMMC owner: %d,IA req: %d,SCU req: %d\n",
+ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+ }
+ spin_unlock_irqrestore(&host->dekker_lock, flags);
+}
+
/*****************************************************************************\
* *
* MMC callbacks *
sdhci_runtime_pm_get(host);
+ sdhci_acquire_ownership(host->mmc);
+
spin_lock_irqsave(&host->lock, flags);
+ if (host->suspended) {
+ pr_err("%s: %s: host is in suspend state\n",
+ __func__, mmc_hostname(mmc));
+ BUG_ON(1);
+ }
+
WARN_ON(host->mrq != NULL);
#ifndef SDHCI_USE_LEDS_CLASS
* tuning procedure before sending command.
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
- !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+ !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
+ mrq->cmd->opcode != MMC_SEND_STATUS) {
if (mmc->card) {
+ if ((mmc->card->ext_csd.part_config & 0x07) ==
+ EXT_CSD_PART_CONFIG_ACC_RPMB)
+ goto end_tuning;
/* eMMC uses cmd21 but sd and sdio use cmd19 */
tuning_opcode =
mmc->card->type == MMC_TYPE_MMC ?
MMC_SEND_TUNING_BLOCK_HS200 :
MMC_SEND_TUNING_BLOCK;
+ host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
-
+end_tuning:
/* Restore original mmc_request structure */
host->mrq = mrq;
}
}
+ if (!(sdhci_readw(host, SDHCI_CLOCK_CONTROL) &
+ SDHCI_CLOCK_CARD_EN)) {
+ /*
+ * SD bus clock is stopped. no interrupts will be
+ * generate in this case.
+ */
+ pr_warn("%s:%s: SD bus clock not enabled\n",
+ __func__, mmc_hostname(mmc));
+ pr_warn("%s:%s: host->pwr 0x%x, host->clock 0x%x\n",
+ __func__, mmc_hostname(mmc),
+ host->pwr, host->clock);
+ sdhci_dumpregs(host);
+ host->mrq->cmd->error = -EIO;
+ tasklet_schedule(&host->finish_tasklet);
+ goto out;
+ }
+
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
sdhci_send_command(host, mrq->cmd);
}
+out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
spin_lock_irqsave(&host->lock, flags);
+ if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8)
+ ios->vdd = 7;
+
if (host->flags & SDHCI_DEVICE_DEAD) {
spin_unlock_irqrestore(&host->lock, flags);
if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+ (ios->timing == MMC_TIMING_MMC_HS400) ||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- if (ios->timing == MMC_TIMING_MMC_HS200)
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS_DDR200;
+ else if (ios->timing == MMC_TIMING_MMC_HS200)
ctrl_2 |= SDHCI_CTRL_HS_SDR200;
else if (ios->timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
struct sdhci_host *host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
+ sdhci_acquire_ownership(mmc);
sdhci_do_set_ios(host, ios);
+ sdhci_release_ownership(mmc);
sdhci_runtime_pm_put(host);
}
{
struct sdhci_host *host = mmc_priv(mmc);
- if (host->ops && host->ops->hw_reset)
+ if (host->ops && host->ops->hw_reset) {
+ sdhci_runtime_pm_get(host);
+ sdhci_acquire_ownership(mmc);
host->ops->hw_reset(host);
+ sdhci_release_ownership(mmc);
+ sdhci_runtime_pm_put(host);
+ }
}
static int sdhci_get_ro(struct mmc_host *mmc)
sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
/*
+ * set the data timeout register to be max value
+ */
+ sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+
+ /*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
* of loops reaches 40 times or a timeout of 150ms occurs.
*/
do {
struct mmc_command cmd = {0};
struct mmc_request mrq = {NULL};
+ unsigned int intmask;
+ unsigned long t = jiffies + msecs_to_jiffies(150);
if (!tuning_loop_counter && !timeout)
break;
sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
sdhci_send_command(host, &cmd);
+ mmiowb();
host->cmd = NULL;
host->mrq = NULL;
- spin_unlock(&host->lock);
- enable_irq(host->irq);
-
- /* Wait for Buffer Read Ready interrupt */
- wait_event_interruptible_timeout(host->buf_ready_int,
- (host->tuning_done == 1),
- msecs_to_jiffies(50));
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ /* delete the timer created by send command */
+ del_timer(&host->timer);
+
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_POLL) {
+ while (!time_after(jiffies, t)) {
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (intmask & SDHCI_INT_DATA_AVAIL) {
+ host->tuning_done = 1;
+ sdhci_writel(host,
+ intmask & SDHCI_INT_DATA_AVAIL,
+ SDHCI_INT_STATUS);
+ break;
+ }
+ }
+ } else {
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (intmask & SDHCI_INT_DATA_AVAIL) {
+ host->tuning_done = 1;
+ sdhci_writel(host,
+ intmask & SDHCI_INT_DATA_AVAIL,
+ SDHCI_INT_STATUS);
+ }
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+
+ if (!host->tuning_done)
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_interruptible_timeout(
+ host->buf_ready_int,
+ (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+ disable_irq(host->irq);
+ spin_lock(&host->lock);
+
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (intmask & SDHCI_INT_DATA_AVAIL) {
+ host->tuning_done = 1;
+ sdhci_writel(host,
+ intmask & SDHCI_INT_DATA_AVAIL,
+ SDHCI_INT_STATUS);
+ }
+ }
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
host->tuning_done = 0;
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- tuning_loop_counter--;
- timeout--;
- mdelay(1);
+ if (tuning_loop_counter)
+ tuning_loop_counter--;
+ if (timeout)
+ timeout--;
+ spin_unlock(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock(&host->lock);
} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
/*
pr_err("%s: Resetting controller.\n",
mmc_hostname(host->mmc));
+ if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+ sdhci_busy_wait(mmc, 1000);
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
spin_unlock_irqrestore(&host->lock, flags);
}
+static void sdhci_set_dev_power(struct mmc_host *mmc, bool poweron)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ if (host->ops->set_dev_power)
+ host->ops->set_dev_power(host, poweron);
+}
+
+static void sdhci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->quirks2 & SDHCI_QUIRK2_NON_STD_CIS) {
+ card->quirks |= MMC_QUIRK_NON_STD_CIS;
+ }
+}
+
+
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.set_ios = sdhci_set_ios,
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
+ .set_dev_power = sdhci_set_dev_power,
+ .init_card = sdhci_init_card,
+ .busy_wait = sdhci_busy_wait,
};
/*****************************************************************************\
{
struct sdhci_host *host = (struct sdhci_host*)param;
+ cancel_delayed_work(&host->mmc->detect);
+
sdhci_card_event(host->mmc);
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
}
static void sdhci_tasklet_finish(unsigned long param)
* upon error conditions.
*/
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
- ((mrq->cmd && mrq->cmd->error) ||
+ ((mrq->cmd && mrq->cmd->error &&
+ mrq->cmd->error != -ENOMEDIUM) ||
(mrq->data && (mrq->data->error ||
(mrq->data->stop && mrq->data->stop->error))) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
+ if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+ sdhci_busy_wait(host->mmc, 1000);
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_release_ownership(host->mmc);
mmc_request_done(host->mmc, mrq);
sdhci_runtime_pm_put(host);
}
+static void dump_rte_apic_reg(struct sdhci_host *host, void __iomem *idx_addr)
+{
+ unsigned int rte_lo, rte_hi;
+
+ writeb(0x10 + 2 * host->irq, idx_addr);
+ rte_lo = readl(host->rte_addr + 0x10);
+
+ writeb(0x10 + 2 * host->irq + 1, idx_addr);
+ rte_hi = readl(host->rte_addr + 0x10);
+
+ pr_err("%s: dump APIC RTE reg - L32: 0x%08x, H32: 0x%08x\n",
+ mmc_hostname(host->mmc), rte_lo, rte_hi);
+}
+
static void sdhci_timeout_timer(unsigned long data)
{
struct sdhci_host *host;
"interrupt.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
+ if (host->rte_addr)
+ dump_rte_apic_reg(host, host->rte_addr);
+
if (host->data) {
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
DBG("Cannot wait for busy signal when also "
"doing a data transfer");
else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
- return;
+ if (!host->r1b_busy_end) {
+ host->r1b_busy_end = 1;
+ return;
+ }
/* The controller does not support the end-of-busy IRQ,
* fall through and take the SDHCI_INT_RESPONSE */
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
+ *
+ * "data timeout" interrupt may also happen
*/
if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_END) {
- sdhci_finish_command(host);
+ if (host->r1b_busy_end)
+ sdhci_finish_command(host);
+ else
+ host->r1b_busy_end = 1;
+ return;
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ pr_err("%s: Got data interrupt 0x%08x for busy cmd %d\n",
+ mmc_hostname(host->mmc),
+ (unsigned)intmask,
+ host->cmd->opcode);
+ host->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&host->finish_tasklet);
return;
}
}
}
if (intmask & SDHCI_INT_CMD_MASK) {
+ /*
+ * If encounter command conflict interrupts,
+ * before clearing it, delay 64 clocks, otherwise the interrupts
+ * will be generated again.
+ * This is just experience. SDHC spec doesn't
+ * say the command conflict interrupts will be generated
+ * again without a delay before clearing them.
+ */
+ if ((intmask & SDHCI_INT_CMD_CONFLICT) ==
+ SDHCI_INT_CMD_CONFLICT) {
+ if (host->clock)
+ udelay(64 * 1000000 / host->clock);
+ else
+ udelay(500);
+ }
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
return result;
}
-/*****************************************************************************\
- * *
- * Suspend/resume *
- * *
-\*****************************************************************************/
-
-#ifdef CONFIG_PM
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
-{
- u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
-
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
-}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-
-void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+/************************************************************************\
+ * *
+ * APIs for panic record use *
+ * Note: *
+ * For panic use, please take care of sdhci_read/write. *
+ * *
+ * sdhci_read/write function are defined by sdhci host layer which *
+ * warpped the read/write function. *
+ * But before calling read/write, sdhci_read/write will try to see if *
+ * some host drivers defined special register reading/writing functions.*
+ * If do, that is means read/write function defined by kernel cannot be *
+ * used, have to use the special ones. *
+ * So, if host driver are using special ones, please make sure when in *
+ * panic mode, the special ones are still good to use *
+ * So, if not, read/write defined by kernel is safe for panic using *
+ * *
+ * @For MFLD sdhci host controller driver, no special reading/writing *
+ * funtion are used *
+ * *
+ \************************************************************************/
+
+static int panic_irq_done;
+
+static void sdhci_panic_irq_wait(struct sdhci_host *host);
+
+static inline void sdhci_panic_finish_req(struct sdhci_host *host)
{
- u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
-
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val &= ~mask;
- sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ panic_irq_done = 1;
}
-EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
-int sdhci_suspend_host(struct sdhci_host *host)
+/*
+ * assuming only use SDMA write and data length is 512Bytes
+ */
+static void sdhci_panic_send_cmd(struct sdhci_host *host,
+ struct mmc_command *cmd)
{
- int ret;
-
- if (host->ops->platform_suspend)
- host->ops->platform_suspend(host);
+ unsigned long timeout;
+ u32 mask;
+ int flags;
- sdhci_disable_card_detection(host);
+ WARN_ON(host->cmd);
+ /* Wait max 10 ms */
+ timeout = 10;
+ mask = SDHCI_CMD_INHIBIT;
+ if ((cmd->data != 0) || (cmd->flags & MMC_RSP_BUSY))
+ mask |= SDHCI_DATA_INHIBIT;
- /* Disable tuning since we are suspending */
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- del_timer_sync(&host->tuning_timer);
- host->flags &= ~SDHCI_NEEDS_RETUNING;
+ while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
+ if (timeout == 0) {
+ pr_err("%s %s: Controller never released inhibit bit(s).\n",
+ __func__, mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ sdhci_panic_finish_req(host);
+ return;
+ }
+ timeout--;
+ /*
+ * seems card is not ready for the next command.
+ * We can wait for 1ms and then to have a retry
+ */
+ mdelay(1);
}
- ret = mmc_suspend_host(host->mmc);
- if (ret) {
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags |= SDHCI_NEEDS_RETUNING;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ host->cmd = cmd;
+ host->r1b_busy_end = 0;
+
+ /*
+ * set the data timeout register to be max value
+ */
+ sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+ /*
+ * prepare data
+ */
+ if (cmd->data) {
+ unsigned int mode;
+ struct mmc_data *data = cmd->data;
+ u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
+ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+ host->data = data;
+ host->data_early = 0;
+ /*
+ * update DMA address
+ */
+ sdhci_writel(host, data->dmabuf, SDHCI_DMA_ADDRESS);
+
+ if (host->version >= SDHCI_SPEC_200) {
+ u8 ctrl;
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
+ (host->flags & SDHCI_USE_ADMA))
+ ctrl |= SDHCI_CTRL_ADMA32;
+ else
+ ctrl |= SDHCI_CTRL_SDMA;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
- sdhci_enable_card_detection(host);
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
+ else
+ sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
- return ret;
- }
+ /*
+ * We do not handle DMA boundaries,
+ * so set it to max (512 KiB)
+ */
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz),
+ SDHCI_BLOCK_SIZE);
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
- free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
+ /*
+ * set transfer mode
+ */
+ mode = SDHCI_TRNS_BLK_CNT_EN;
+ if (data->blocks > 1) {
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+ mode |= SDHCI_TRNS_MULTI |
+ SDHCI_TRNS_AUTO_CMD12;
+ else
+ mode |= SDHCI_TRNS_MULTI;
+ }
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_TRNS_DMA;
+
+ sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
- return ret;
-}
-EXPORT_SYMBOL_GPL(sdhci_suspend_host);
+ sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
-int sdhci_resume_host(struct sdhci_host *host)
-{
- int ret;
+ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+ pr_err("%s %s: Unsupported response type!\n",
+ __func__, mmc_hostname(host->mmc));
+ sdhci_panic_finish_req(host);
+ return;
+ }
+
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ flags = SDHCI_CMD_RESP_NONE;
+ else if (cmd->flags & MMC_RSP_136)
+ flags = SDHCI_CMD_RESP_LONG;
+ else if (cmd->flags & MMC_RSP_BUSY)
+ flags = SDHCI_CMD_RESP_SHORT_BUSY;
+ else
+ flags = SDHCI_CMD_RESP_SHORT;
+
+ if (cmd->flags & MMC_RSP_CRC)
+ flags |= SDHCI_CMD_CRC;
+ if (cmd->flags & MMC_RSP_OPCODE)
+ flags |= SDHCI_CMD_INDEX;
+ if (cmd->data)
+ flags |= SDHCI_CMD_DATA;
+
+ /*
+ * send command
+ */
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+
+ mmiowb();
+
+ /*
+ * polling interrupt
+ */
+ sdhci_panic_irq_wait(host);
+}
+
+static void sdhci_panic_finish_data(struct sdhci_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ /*
+ * panic use, will not unmap anything here
+ */
+
+ /*
+ * The specification states that the block count register must
+ * be updated, but it does not specify at what point in the
+ * data flow. That makes the register entirely useless to read
+ * back so we have to assume that nothing made it to the card
+ * in the event of an error.
+ */
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+
+ if (data->stop) {
+ /*
+ * we will not be here since we use single block
+ * transfer when panic occured
+ */
+ sdhci_panic_send_cmd(host, data->stop);
+ } else
+ sdhci_panic_finish_req(host);
+}
+
+static void sdhci_panic_finish_command(struct sdhci_host *host)
+{
+ int i;
+
+ BUG_ON(host->cmd == NULL);
+
+ if (host->cmd->flags & MMC_RSP_PRESENT) {
+ if (host->cmd->flags & MMC_RSP_136) {
+ /* CRC is stripped so we need to do some shifting. */
+ for (i = 0; i < 4; i++) {
+ host->cmd->resp[i] = sdhci_readl(host,
+ SDHCI_RESPONSE + (3-i)*4) << 8;
+ if (i != 3)
+ host->cmd->resp[i] |=
+ sdhci_readb(host,
+ SDHCI_RESPONSE + (3-i)*4-1);
+ }
+ } else {
+ host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ }
+ }
+
+ host->cmd->error = 0;
+
+ if (host->data && host->data_early)
+ sdhci_panic_finish_data(host);
+
+ if (!host->cmd->data)
+ sdhci_panic_finish_req(host);
+
+ host->cmd = NULL;
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some cmd irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_cmd_irq(struct sdhci_host *host, u32 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error) {
+ sdhci_panic_finish_req(host);
+ return;
+ }
+
+ if (host->cmd->flags & MMC_RSP_BUSY) {
+ if (host->cmd->data)
+ pr_debug("Cannot wait for busy signal when also doing a data transfer\n");
+ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+ if (!host->r1b_busy_end) {
+ host->r1b_busy_end = 1;
+ return;
+ }
+ }
+
+ if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_panic_finish_command(host);
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some data irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_data_irq(struct sdhci_host *host, u32 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->data) {
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+ * above in sdhci_cmd_irq().
+ */
+ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_END) {
+ if (host->r1b_busy_end)
+ sdhci_panic_finish_command(host);
+ else
+ host->r1b_busy_end = 1;
+ return;
+ }
+ }
+
+ pr_err("%s %s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
+ __func__, mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_dumpregs(host);
+
+ return;
+ }
+
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+ host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ }
+
+ if (host->data->error)
+ sdhci_panic_finish_data(host);
+ else {
+ if (intmask & SDHCI_INT_DMA_END)
+ sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
+ SDHCI_DMA_ADDRESS);
+
+ if (intmask & SDHCI_INT_DATA_END) {
+ if (host->cmd)
+ host->data_early = 1;
+ else
+ sdhci_panic_finish_data(host);
+ }
+ }
+}
+
+/*
+ * sdhci_panic_irq_wait: irq handler for panic record
+ */
+static void sdhci_panic_irq_wait(struct sdhci_host *host)
+{
+ u32 intmask;
+ panic_irq_done = 0;
+retry:
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+
+ if (!intmask || intmask == 0xffffffff)
+ goto retry;
+
+ DBG("***%s got interrupt: 0x%08x\n",
+ __func__, intmask);
+
+ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ /*
+ * do nothing for card detect
+ */
+ }
+
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
+ SDHCI_INT_STATUS);
+ sdhci_panic_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+ }
+
+ if (intmask & SDHCI_INT_DATA_MASK) {
+ sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
+ SDHCI_INT_STATUS);
+ sdhci_panic_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+ }
+
+ intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+
+ intmask &= ~SDHCI_INT_ERROR;
+
+ if (intmask & SDHCI_INT_BUS_POWER) {
+ pr_err("%s %s: Card is consuming too much power!\n",
+ __func__, mmc_hostname(host->mmc));
+ sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
+ }
+
+ intmask &= ~SDHCI_INT_BUS_POWER;
+
+ if (intmask & SDHCI_INT_CARD_INT) {
+ sdhci_writel(host, intmask & SDHCI_INT_CARD_INT,
+ SDHCI_INT_STATUS);
+ /*
+ * do nothing for this irq
+ */
+ intmask &= ~SDHCI_INT_CARD_INT;
+ }
+
+ if (intmask) {
+ pr_err("%s %s: Unexpected interrupt 0x%08x.\n",
+ __func__, mmc_hostname(host->mmc), intmask);
+ sdhci_dumpregs(host);
+
+ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+ }
+
+ mmiowb();
+ if (!panic_irq_done)
+ goto retry;
+}
+
+static void sdhci_mfld_panic_set_ios(struct mmc_panic_host *mmc)
+{
+ struct sdhci_host *host;
+ struct mmc_ios *ios;
+ u8 ctrl;
+
+ if (!mmc)
+ return;
+ ios = &mmc->ios;
+ host = (struct sdhci_host *)mmc->priv;
+
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF)
+ pr_info("%s: we are in panic, why need power off?\n", __func__);
+
+ sdhci_set_clock(host, ios->clock);
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ sdhci_set_power(host, -1);
+ else
+ sdhci_set_power(host, ios->vdd);
+
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+ if ((ios->timing == MMC_TIMING_SD_HS ||
+ ios->timing == MMC_TIMING_MMC_HS)
+ && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /*
+ * Some (ENE) controllers go apeshit on some ios operation,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ mmiowb();
+}
+
+static void sdhci_panic_reinit_host(struct mmc_panic_host *mmc)
+{
+ struct sdhci_host *host = mmc->priv;
+ sdhci_init(host, 0);
+ host->pwr = 0; /* force power reprogram */
+ host->clock = 0; /* force clock reprogram */
+ sdhci_mfld_panic_set_ios(mmc);
+ mmiowb();
+}
+
+static void sdhci_mfld_panic_request(struct mmc_panic_host *panic_mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host;
+ bool present;
+
+ if (!panic_mmc || !mrq)
+ return;
+
+ host = (struct sdhci_host *)panic_mmc->priv;
+
+ /*
+ * only support single block data DMA write
+ */
+ if (mrq->cmd->data) {
+ if (mrq->cmd->data->blocks != 1 ||
+ mrq->cmd->data->flags & MMC_DATA_READ)
+ mrq->cmd->error = -EINVAL;
+ }
+
+ if (host->flags & SDHCI_USE_ADMA)
+ host->flags &= ~SDHCI_USE_ADMA;
+
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
+ if (mrq->stop) {
+ mrq->data->stop = NULL;
+ mrq->stop = NULL;
+ }
+ }
+
+ host->mrq = mrq;
+
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = true;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ if (!present) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ sdhci_panic_finish_req(host);
+ } else
+ sdhci_panic_send_cmd(host, mrq->cmd);
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (mrq->cmd->error || (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
+ pr_err("%s: request handle failed\n", __func__);
+ sdhci_dumpregs(host);
+ sdhci_panic_reinit_host(panic_mmc);
+ }
+}
+
+/*
+ * The same like sdhci_acquire_ownership, used for IA to get the ownership
+ * before using host controller. Since this function is called in panic mode,
+ * so we can not use msleep() like sdhci_acquire_ownership does, use mdelay()
+ * instead.
+ */
+static int sdhci_mfld_panic_acquire_ownership(struct sdhci_host *host)
+{
+ unsigned long t1, t2;
+
+ if (!host->sram_addr)
+ return DEKKER_OWNER_IA;
+
+ /* If IA has already hold the eMMC mutex, then just exit */
+ if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET))
+ return 0;
+
+ writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+ t1 = 100;
+ t2 = 500;
+
+ while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+ if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+ DEKKER_OWNER_IA) {
+ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+ while (t2) {
+ if (readl(host->sram_addr +
+ DEKKER_EMMC_OWNER_OFFSET) ==
+ DEKKER_OWNER_IA)
+ break;
+ mdelay(10);
+ t2--;
+ }
+ if (t2)
+ writel(1, host->sram_addr +
+ DEKKER_IA_REQ_OFFSET);
+ else
+ goto timeout;
+ }
+ /*
+ * if we go to here, that means SCU FW is releasing the
+ * ownership, so we just wait for a short time here.
+ */
+ if (t1) {
+ mdelay(10);
+ t1--;
+ } else
+ goto timeout;
+ }
+
+ pr_debug("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+ return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+ DEKKER_OWNER_IA) ? DEKKER_OWNER_SCU : DEKKER_OWNER_IA;
+timeout:
+
+ pr_warn("%s: Timeout to hold eMMC mutex\n", __func__);
+ return -EBUSY;
+}
+
+static int sdhci_mfld_panic_power_on(struct mmc_panic_host *panic_host)
+{
+ int ret;
+ struct mmc_host *mmc;
+ struct sdhci_host *host;
+
+ if (!panic_host)
+ return -ENODEV;
+ mmc = panic_host->mmc;
+ host = panic_host->priv;
+
+ if (host->runtime_suspended) {
+ /*
+ * power host controller
+ */
+ pm_runtime_get_noresume(mmc->parent);
+
+ if (host->ops->power_up_host) {
+ ret = host->ops->power_up_host(host);
+ if (ret)
+ return ret;
+ }
+ sdhci_panic_reinit_host(panic_host);
+ host->runtime_suspended = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_mfld_panic_hold_mutex(struct mmc_panic_host *panic_host)
+{
+ struct sdhci_host *host;
+ int ret;
+
+ if (!panic_host)
+ return -ENODEV;
+
+ host = panic_host->priv;
+
+ ret = sdhci_mfld_panic_acquire_ownership(host);
+
+ if (ret == DEKKER_OWNER_SCU) {
+ if (host->ops->power_up_host) {
+ ret = host->ops->power_up_host(host);
+ if (ret)
+ return ret;
+ }
+ sdhci_panic_reinit_host(panic_host);
+ return 0;
+ } else if (ret == DEKKER_OWNER_IA)
+ return sdhci_mfld_panic_power_on(panic_host);
+
+ return ret;
+}
+
+static void sdhci_mfld_panic_release_mutex(struct mmc_panic_host *panic_host)
+{
+ struct sdhci_host *host;
+
+ if (!panic_host)
+ return;
+ host = panic_host->priv;
+
+ if (!host->sram_addr)
+ return;
+
+ writel(DEKKER_OWNER_SCU,
+ host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+ DBG("Exit ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+}
+
+static void sdhci_mfld_panic_prepare(struct mmc_panic_host *panic_host)
+{
+ struct sdhci_host *host;
+
+ if (!panic_host)
+ return;
+ host = panic_host->priv;
+
+ /*
+ * assume host is powered off
+ */
+ host->runtime_suspended = true;
+
+#ifdef CONFIG_PM_RUNTIME
+ /*
+ * disable runtime pm directly
+ */
+ panic_host->mmc->parent->power.disable_depth = 1;
+#endif
+}
+
+static int sdhci_mfld_panic_setup(struct mmc_panic_host *panic_host)
+{
+ struct sdhci_host *host;
+
+ if (!panic_host)
+ return 0;
+
+ host = mmc_priv(panic_host->mmc);
+ panic_host->priv = (void *)host;
+
+ return 0;
+}
+
+const struct mmc_host_panic_ops sdhci_panic_ops = {
+ .request = sdhci_mfld_panic_request,
+ .prepare = sdhci_mfld_panic_prepare,
+ .setup = sdhci_mfld_panic_setup,
+ .set_ios = sdhci_mfld_panic_set_ios,
+ .power_on = sdhci_mfld_panic_power_on,
+ .hold_mutex = sdhci_mfld_panic_hold_mutex,
+ .release_mutex = sdhci_mfld_panic_release_mutex,
+};
+
+void sdhci_alloc_panic_host(struct sdhci_host *host)
+{
+ if (!host->mmc)
+ return;
+ mmc_alloc_panic_host(host->mmc, &sdhci_panic_ops);
+}
+EXPORT_SYMBOL_GPL(sdhci_alloc_panic_host);
+
+
+/*****************************************************************************\
+ * *
+ * Suspend/resume *
+ * *
+\*****************************************************************************/
+
+static void sdhci_set_emmc_state(struct sdhci_host *host, uint32_t state)
+{
+ /* Only if there is dekker mutex available */
+ if (!host->sram_addr)
+ return;
+ writel(state, host->sram_addr + DEKKER_EMMC_STATE);
+}
+
+#ifdef CONFIG_PM
+void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
+
+void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
+
+int sdhci_suspend_host(struct sdhci_host *host)
+{
+ int ret;
+ unsigned long flags;
+
+ if (host->ops->platform_suspend)
+ host->ops->platform_suspend(host);
+
+ sdhci_acquire_ownership(host->mmc);
+
+ sdhci_disable_card_detection(host);
+
+ /* Disable tuning since we are suspending */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ }
+
+ ret = mmc_suspend_host(host->mmc);
+ if (ret) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ host->flags |= SDHCI_NEEDS_RETUNING;
+ mod_timer(&host->tuning_timer, jiffies +
+ host->tuning_count * HZ);
+ }
+
+ sdhci_enable_card_detection(host);
+
+ goto out;
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+ enable_irq_wake(host->irq);
+ }
+
+ /* Card succesfully suspended. Tell information to SCU */
+ sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_SUSPENDED);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+out:
+ sdhci_release_ownership(host->mmc);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_suspend_host);
+
+int sdhci_resume_host(struct sdhci_host *host)
+{
+ int ret;
+ unsigned long flags;
+
+ sdhci_acquire_ownership(host->mmc);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
- return ret;
+ goto out;
} else {
sdhci_disable_irq_wakeups(host);
disable_irq_wake(host->irq);
mmiowb();
}
+ spin_lock_irqsave(&host->lock, flags);
+ host->suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
ret = mmc_resume_host(host->mmc);
sdhci_enable_card_detection(host);
if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
+ /* Card back in active state */
+ sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_ACTIVE);
+out:
+ sdhci_release_ownership(host->mmc);
return ret;
}
unsigned long flags;
int ret = 0;
+ sdhci_do_acquire_ownership(host->mmc);
/* Disable tuning since we are suspending */
if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->runtime_suspended = true;
spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_release_ownership(host->mmc);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
unsigned long flags;
int ret = 0, host_flags = host->flags;
+ sdhci_do_acquire_ownership(host->mmc);
+
if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_release_ownership(host->mmc);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+/**
+ * sdhci_pci_request_regulators - try requesting regulator of
+ * a sdhci device
+ *
+ * We take care of race conditions here between sdhci_add_host() (probe)
+ * and platform code that may kick a retry at anytime during boot.
+ */
+int sdhci_try_get_regulator(struct sdhci_host *host)
+{
+ struct regulator *vmmc;
+ unsigned long flags;
+ if (!host->vmmc) {
+ vmmc = regulator_get(mmc_dev(host->mmc), "vmmc");
+ if (!IS_ERR(vmmc)) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->vmmc) {
+ host->vmmc = vmmc;
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+ } else {
+ /* race! we got the regulator twice */
+ spin_unlock_irqrestore(&host->lock, flags);
+ regulator_put(vmmc);
+ }
+ }
+ }
+ return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(sdhci_try_get_regulator);
+
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
/* Initial value for re-tuning timer count */
host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+ if (host->tuning_count == 0 && host->ops->get_tuning_count)
+ host->tuning_count = host->ops->get_tuning_count(host);
/*
* In case Re-tuning Timer is not disabled, the actual value of
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
-
- host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
- if (IS_ERR_OR_NULL(host->vmmc)) {
- if (PTR_ERR(host->vmmc) < 0) {
- pr_info("%s: no vmmc regulator found\n",
- mmc_hostname(mmc));
- host->vmmc = NULL;
- }
- }
+ spin_lock_init(&host->lock);
#ifdef CONFIG_REGULATOR
+ sdhci_try_get_regulator(host);
/*
* Voltage range check makes sense only if regulator reports
* any voltage value.
SDHCI_MAX_CURRENT_MULTIPLIER;
}
+ if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8)
+ ocr_avail |= MMC_VDD_20_21;
+ if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8)
+ ocr_avail |= MMC_VDD_32_33;
+
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
return -ENODEV;
}
- spin_lock_init(&host->lock);
-
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
mmc_hostname(mmc), host->irq, ret);
goto untasklet;
}
+ sdhci_do_acquire_ownership(mmc);
sdhci_init(host, 0);
#endif
#ifdef SDHCI_USE_LEDS_CLASS
- snprintf(host->led_name, sizeof(host->led_name),
- "%s::", mmc_hostname(mmc));
- host->led.name = host->led_name;
- host->led.brightness = LED_OFF;
- host->led.default_trigger = mmc_hostname(mmc);
- host->led.brightness_set = sdhci_led_control;
-
- ret = led_classdev_register(mmc_dev(mmc), &host->led);
- if (ret) {
- pr_err("%s: Failed to register LED device: %d\n",
- mmc_hostname(mmc), ret);
- goto reset;
+ if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
+ goto reset;
+ }
}
#endif
sdhci_enable_card_detection(host);
+ sdhci_release_ownership(mmc);
+
return 0;
#ifdef SDHCI_USE_LEDS_CLASS
reset:
- sdhci_reset(host, SDHCI_RESET_ALL);
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
- free_irq(host->irq, host);
+ if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+ sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ free_irq(host->irq, host);
+ }
+ sdhci_release_ownership(mmc);
#endif
untasklet:
tasklet_kill(&host->card_tasklet);
mmc_remove_host(host->mmc);
#ifdef SDHCI_USE_LEDS_CLASS
- led_classdev_unregister(&host->led);
+ if (host->mmc->caps2 & MMC_CAP2_LED_SUPPORT)
+ led_classdev_unregister(&host->led);
#endif
if (!dead)
#define SDHCI_POWER_180 0x0A
#define SDHCI_POWER_300 0x0C
#define SDHCI_POWER_330 0x0E
+#define SDHCI_HW_RESET 0x10
#define SDHCI_BLOCK_GAP_CONTROL 0x2A
#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
#define SDHCI_INT_CRC 0x00020000
+#define SDHCI_INT_CMD_CONFLICT 0x00030000
#define SDHCI_INT_END_BIT 0x00040000
#define SDHCI_INT_INDEX 0x00080000
#define SDHCI_INT_DATA_TIMEOUT 0x00100000
#define SDHCI_CTRL_UHS_SDR50 0x0002
#define SDHCI_CTRL_UHS_SDR104 0x0003
#define SDHCI_CTRL_UHS_DDR50 0x0004
-#define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */
+#define SDHCI_CTRL_HS_SDR200 SDHCI_CTRL_UHS_SDR104
+#define SDHCI_CTRL_HS_DDR200 0x0005
#define SDHCI_CTRL_VDD_180 0x0008
#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define SDHCI_CTRL_DRV_TYPE_B 0x0000
#define SDHCI_PRESET_FOR_SDR50 0x6A
#define SDHCI_PRESET_FOR_SDR104 0x6C
#define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_FOR_HS400 0x74
#define SDHCI_PRESET_DRV_MASK 0xC000
#define SDHCI_PRESET_DRV_SHIFT 14
#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
void (*platform_resume)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
void (*platform_init)(struct sdhci_host *host);
+ int (*power_up_host)(struct sdhci_host *host);
+ void (*set_dev_power)(struct sdhci_host *, bool);
+ int (*get_cd)(struct sdhci_host *host);
+ int (*get_tuning_count)(struct sdhci_host *host);
+ int (*gpio_buf_check)(struct sdhci_host *host, unsigned int clk);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
extern void sdhci_card_detect(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern int sdhci_try_get_regulator(struct sdhci_host *host);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
+extern void sdhci_alloc_panic_host(struct sdhci_host *host);
#endif /* __SDHCI_HW_H */
To compile this driver as a module, choose M here: the module
will be called mwl8k. If unsure, say N.
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
+config WIFI_PLATFORM_DATA
+ bool "Enable WiFi platform data"
+ ---help---
+ Enables platform_wifi
+
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
# Some architectures use the generic PCI setup functions
#
obj-$(CONFIG_X86) += setup-bus.o
+obj-$(CONFIG_ATOM_SOC_POWER) += pci-atom_soc.o
obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
--- /dev/null
+/*
+ * pci-atom_soc.c - register Intel MID PCI plaform ops
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+ return true;
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+ return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+ .is_manageable = mid_pci_power_manageable,
+ .choose_state = mid_pci_choose_state,
+ .sleep_wake = mid_pci_sleep_wake,
+ .run_wake = mid_pci_run_wake,
+ .set_state = pmu_pci_set_power_state,
+ .choose_state = pmu_pci_choose_state,
+};
+
+/**
+ * mid_pci_init - It registers callback function for all the PCI devices
+ * for platform specific device power on/shutdown acticities.
+ */
+static int __init mid_pci_init(void)
+{
+ if (boot_cpu_data.x86 != 6)
+ return 0;
+
+ /*
+ * n.b. this model check does not uniquely identify the platform,
+ * and additional checks are necessary inside the pmu driver
+ */
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_ATOM_MFLD:
+ case INTEL_ATOM_CLV:
+ case INTEL_ATOM_MRFLD:
+ pci_set_platform_pm(&mid_pci_platform_pm);
+ break;
+ }
+
+ return 0;
+}
+arch_initcall(mid_pci_init);
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
+#ifdef CONFIG_ATOM_SOC_POWER
+ ; /* On Intel mid platforms pci delays are handled by SCU */
+#else
udelay(PCI_PM_D2_DELAY);
-
+#endif
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state && printk_ratelimit())
pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
- pci_fixup_device(pci_fixup_final, dev);
+ if (dev->device==0x119d)
+ printk("Ignore USB EHCI Tanhier (HSIC) since we do not have the driver in the kernel\n");
+ else
+ pci_fixup_device(pci_fixup_final, dev);
+
/*
* If arch hasn't set it explicitly yet, use the CLS
* value shared by all PCI devices. If there's a
IPC is used to bridge the communications between kernel and SCU on
some embedded Intel x86 platforms. This is not needed for PC-type
machines.
+choice
+ prompt "IPC access mode"
+ depends on INTEL_SCU_IPC
+ default INTEL_SCU_IPC_INTR_MODE
+ ---help---
+ Select the desired access mode for IPC call.
+
+config INTEL_SCU_IPC_INTR_MODE
+ bool "Intel SCU IPC interrupt mode"
+
+config INTEL_SCU_IPC_POLL_MODE
+ bool "Intel SCU IPC polling mode"
+
+endchoice
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config INTEL_SCU_FLIS
+ bool "scu flis driver config"
+ depends on INTEL_SCU_IPC
+ default y
+ help
+ This driver builds the SCU Flis Access Sysfs Interfaces.
+ We could read write the flis address and configure the
+ pin pull up/down using these interfaces.
+
+config INTEL_PSH_IPC
+ bool "Intel PSH IPC Support"
+ depends on X86_INTEL_MID
+ ---help---
+ PSH(Platform Services Hub) is a low frequence IA core on Tangier Platform,
+ whose power consumption is quite low. PSH runs RTOS software inside itself,
+ which independently controls and collects sensor data, pre-processes the data,
+ and communicates with Atom. Thus ATOM side could be put into low power mode
+ with more time, while all the sensor data are collected without any lost.
+
+ PSH IPC is used as a bridge for OS sensor service to control and access PSH
+ sensors communications between kernel and PSH. This is not needed for PC-type
+ machines.
+
+ Say Y here to get Intel PSH IPC support.
+
+
endif # X86_PLATFORM_DEVICES
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
-obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o intel_scu_pmic.o intel_scu_mip.o intel_scu_fw_update.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_SCU_FLIS) += intel_scu_flis.o
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
+
+obj-$(CONFIG_INTEL_PSH_IPC) += intel_psh_ipc.o
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/input.h>
-#include <linux/mfd/intel_msic.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
+#include <linux/async.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
#define DRIVER_NAME "msic_power_btn"
-#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
+struct mid_pb_priv {
+ struct input_dev *input;
+ int irq;
+ void __iomem *pb_stat;
+ u16 pb_level;
+ u16 irq_lvl1_mask;
+ bool irq_ack;
+};
-/*
- * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask
- * power button interrupt
- */
-#define MSIC_PWRBTNM (1 << 0)
+static inline int pb_clear_bits(u16 addr, u8 mask)
+{
+ return intel_scu_ipc_update_register(addr, 0, mask);
+}
-static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
+static irqreturn_t mid_pb_isr(int irq, void *dev_id)
{
- struct input_dev *input = dev_id;
- int ret;
+ struct mid_pb_priv *priv = dev_id;
u8 pbstat;
- ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat);
- dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat);
+ pbstat = readb(priv->pb_stat);
+ dev_dbg(&priv->input->dev, "pbstat: 0x%x\n", pbstat);
- if (ret < 0) {
- dev_err(input->dev.parent, "Read error %d while reading"
- " MSIC_PB_STATUS\n", ret);
- } else {
- input_event(input, EV_KEY, KEY_POWER,
- !(pbstat & MSIC_PB_LEVEL));
- input_sync(input);
- }
+ input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & priv->pb_level));
+ input_sync(priv->input);
+
+ if (pbstat & priv->pb_level)
+ pr_info("[%s] power button released\n", priv->input->name);
+ else
+ pr_info("[%s] power button pressed\n", priv->input->name);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mid_pb_threaded_isr(int irq, void *dev_id)
+{
+ struct mid_pb_priv *priv = dev_id;
+
+ if (priv->irq_ack)
+ pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
return IRQ_HANDLED;
}
-static int mfld_pb_probe(struct platform_device *pdev)
+static int mid_pb_probe(struct platform_device *pdev)
{
struct input_dev *input;
- int irq = platform_get_irq(pdev, 0);
- int error;
+ struct mid_pb_priv *priv;
+ int irq;
+ int ret;
+ struct intel_msic_power_btn_platform_data *pdata;
+
+ if (pdev == NULL)
+ return -ENODEV;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No power button platform data\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "Probed mid powerbutton devivce\n");
+ irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
+ priv = kzalloc(sizeof(struct mid_pb_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
input = input_allocate_device();
if (!input) {
- dev_err(&pdev->dev, "Input device allocation error\n");
+ kfree(priv);
return -ENOMEM;
}
+ priv->input = input;
+ priv->irq = irq;
+ platform_set_drvdata(pdev, priv);
+
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
- DRIVER_NAME, input);
- if (error) {
- dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
- "button\n", irq);
- goto err_free_input;
+ priv->pb_stat = ioremap(pdata->pbstat, MSIC_PB_LEN);
+ if (!priv->pb_stat) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input dev, error %d\n", ret);
+ goto out_iounmap;
}
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev, "Unable to register input dev, error "
- "%d\n", error);
- goto err_free_irq;
+ priv->pb_level = pdata->pb_level;
+ priv->irq_lvl1_mask = pdata->irq_lvl1_mask;
+
+ /* Unmask the PBIRQ and MPBIRQ on Tangier */
+ if (pdata->irq_ack) {
+ pdata->irq_ack(pdata);
+ priv->irq_ack = true;
}
- platform_set_drvdata(pdev, input);
+ ret = request_threaded_irq(priv->irq, mid_pb_isr, mid_pb_threaded_isr,
+ IRQF_NO_SUSPEND, DRIVER_NAME, priv);
- /*
- * SCU firmware might send power button interrupts to IA core before
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to request irq %d for power button\n", irq);
+ goto out_unregister_input;
+ }
+
+ /* SCU firmware might send power button interrupts to IA core before
* kernel boots and doesn't get EOI from IA core. The first bit of
- * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new
+ * MSIC lvl1 mask reg is kept masked, and SCU firmware doesn't send new
* power interrupt to Android kernel. Unmask the bit when probing
* power button in kernel.
- * There is a very narrow race between irq handler and power button
- * initialization. The race happens rarely. So we needn't worry
- * about it.
*/
- error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM);
- if (error) {
- dev_err(&pdev->dev, "Unable to clear power button interrupt, "
- "error: %d\n", error);
- goto err_free_irq;
- }
+ pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
return 0;
-err_free_irq:
- free_irq(irq, input);
-err_free_input:
+out_unregister_input:
+ input_unregister_device(input);
+ input = NULL;
+out_iounmap:
+ iounmap(priv->pb_stat);
+fail:
+ platform_set_drvdata(pdev, NULL);
input_free_device(input);
- return error;
+ kfree(priv);
+ return ret;
}
-static int mfld_pb_remove(struct platform_device *pdev)
+static int mid_pb_remove(struct platform_device *pdev)
{
- struct input_dev *input = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
+ struct mid_pb_priv *priv = platform_get_drvdata(pdev);
- free_irq(irq, input);
- input_unregister_device(input);
+ iounmap(priv->pb_stat);
+ free_irq(priv->irq, priv);
platform_set_drvdata(pdev, NULL);
+ input_unregister_device(priv->input);
+ kfree(priv);
return 0;
}
-static struct platform_driver mfld_pb_driver = {
+static const struct platform_device_id mid_pb_table[] = {
+ {"mid_powerbtn", 1},
+};
+
+static struct platform_driver mid_pb_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
- .probe = mfld_pb_probe,
- .remove = mfld_pb_remove,
+ .probe = mid_pb_probe,
+ .remove = mid_pb_remove,
+ .id_table = mid_pb_table,
};
-module_platform_driver(mfld_pb_driver);
+static int __init mid_pb_module_init(void)
+{
+ return platform_driver_register(&mid_pb_driver);
+}
+
+static void mid_pb_module_exit(void)
+{
+ platform_driver_unregister(&mid_pb_driver);
+}
+
+/* RPMSG related functionality */
+
+static int mid_pb_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed mid_pb rpmsg device\n");
+
+ ret = mid_pb_module_init();
+out:
+ return ret;
+}
+
+
+static void mid_pb_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ mid_pb_module_exit();
+ dev_info(&rpdev->dev, "Removed mid_pb rpmsg device\n");
+}
+
+static void mid_pb_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id mid_pb_id_table[] = {
+ { .name = "rpmsg_mid_powerbtn" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, mid_pb_id_table);
+
+
+static struct rpmsg_driver mid_pb_rpmsg_driver = {
+ .drv.name = DRIVER_NAME,
+ .drv.owner = THIS_MODULE,
+ .probe = mid_pb_rpmsg_probe,
+ .callback = mid_pb_rpmsg_cb,
+ .remove = mid_pb_rpmsg_remove,
+ .id_table = mid_pb_id_table,
+};
+
+static int __init mid_pb_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+static void __exit mid_pb_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+late_initcall(mid_pb_rpmsg_init);
+
+module_exit(mid_pb_rpmsg_exit);
MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
MODULE_DESCRIPTION("Intel Medfield Power Button Driver");
+++ /dev/null
-/*
- * intel_mid_thermal.c - Intel MID platform thermal driver
- *
- * Copyright (C) 2011 Intel Corporation
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Author: Durgadoss R <durgadoss.r@intel.com>
- */
-
-#define pr_fmt(fmt) "intel_mid_thermal: " fmt
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/param.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pm.h>
-#include <linux/thermal.h>
-#include <linux/mfd/intel_msic.h>
-
-/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS 4
-
-/* ADC1 - thermal registers */
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
-
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-#define MSIC_CHANL_MASK_VAL 0x0F
-
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-/* Number of ADC channels */
-#define ADC_CHANLS_MAX 15
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
-
-/* ADC channel code values */
-#define SKIN_SENSOR0_CODE 0x08
-#define SKIN_SENSOR1_CODE 0x09
-#define SYS_SENSOR_CODE 0x0A
-#define MSIC_DIE_SENSOR_CODE 0x03
-
-#define SKIN_THERM_SENSOR0 0
-#define SKIN_THERM_SENSOR1 1
-#define SYS_THERM_SENSOR2 2
-#define MSIC_DIE_THERM_SENSOR3 3
-
-/* ADC code range */
-#define ADC_MAX 977
-#define ADC_MIN 162
-#define ADC_VAL0C 887
-#define ADC_VAL20C 720
-#define ADC_VAL40C 508
-#define ADC_VAL60C 315
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR INTEL_MSIC_ADC1ADDR0 /* increments by 1 */
-#define ADC_DATA_START_ADDR INTEL_MSIC_ADC1SNS0H /* increments by 2 */
-
-/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN 488
-#define MSIC_DIE_ADC_MAX 1004
-
-/* This holds the address of the first free ADC channel,
- * among the 15 channels
- */
-static int channel_index;
-
-struct platform_info {
- struct platform_device *pdev;
- struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
-};
-
-struct thermal_device_info {
- unsigned int chnl_addr;
- int direct;
- /* This holds the current temperature in millidegree celsius */
- long curr_temp;
-};
-
-/**
- * to_msic_die_temp - converts adc_val to msic_die temperature
- * @adc_val: ADC value to be converted
- *
- * Can sleep
- */
-static int to_msic_die_temp(uint16_t adc_val)
-{
- return (368 * (adc_val) / 1000) - 220;
-}
-
-/**
- * is_valid_adc - checks whether the adc code is within the defined range
- * @min: minimum value for the sensor
- * @max: maximum value for the sensor
- *
- * Can sleep
- */
-static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
-{
- return (adc_val >= min) && (adc_val <= max);
-}
-
-/**
- * adc_to_temp - converts the ADC code to temperature in C
- * @direct: true if ths channel is direct index
- * @adc_val: the adc_val that needs to be converted
- * @tp: temperature return value
- *
- * Linear approximation is used to covert the skin adc value into temperature.
- * This technique is used to avoid very long look-up table to get
- * the appropriate temp value from ADC value.
- * The adc code vs sensor temp curve is split into five parts
- * to achieve very close approximate temp value with less than
- * 0.5C error
- */
-static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
-{
- int temp;
-
- /* Direct conversion for die temperature */
- if (direct) {
- if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
- *tp = to_msic_die_temp(adc_val) * 1000;
- return 0;
- }
- return -ERANGE;
- }
-
- if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
- return -ERANGE;
-
- /* Linear approximation for skin temperature */
- if (adc_val > ADC_VAL0C)
- temp = 177 - (adc_val/5);
- else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
- temp = 111 - (adc_val/8);
- else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
- temp = 92 - (adc_val/10);
- else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
- temp = 91 - (adc_val/10);
- else
- temp = 112 - (adc_val/6);
-
- /* Convert temperature in celsius to milli degree celsius */
- *tp = temp * 1000;
- return 0;
-}
-
-/**
- * mid_read_temp - read sensors for temperature
- * @temp: holds the current temperature for the sensor after reading
- *
- * reads the adc_code from the channel and converts it to real
- * temperature. The converted value is stored in temp.
- *
- * Can sleep
- */
-static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
-{
- struct thermal_device_info *td_info = tzd->devdata;
- uint16_t adc_val, addr;
- uint8_t data = 0;
- int ret;
- unsigned long curr_temp;
-
-
- addr = td_info->chnl_addr;
-
- /* Enable the msic for conversion before reading */
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
- if (ret)
- return ret;
-
- /* Re-toggle the RRDATARD bit (temporary workaround) */
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
- if (ret)
- return ret;
-
- /* Read the higher bits of data */
- ret = intel_msic_reg_read(addr, &data);
- if (ret)
- return ret;
-
- /* Shift bits to accommodate the lower two data bits */
- adc_val = (data << 2);
- addr++;
-
- ret = intel_msic_reg_read(addr, &data);/* Read lower bits */
- if (ret)
- return ret;
-
- /* Adding lower two bits to the higher bits */
- data &= 03;
- adc_val += data;
-
- /* Convert ADC value to temperature */
- ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
- if (ret == 0)
- *temp = td_info->curr_temp = curr_temp;
- return ret;
-}
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static int configure_adc(int val)
-{
- int ret;
- uint8_t data;
-
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if (val) {
- /* Enable and start the ADC */
- data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- } else {
- /* Just stop the ADC */
- data &= (~MSIC_ADC_START);
- }
- return intel_msic_reg_write(INTEL_MSIC_ADC1CNTL1, data);
-}
-
-/**
- * set_up_therm_channel - enable thermal channel for conversion
- * @base_addr: index of free msic ADC channel
- *
- * Enable all the three channels for conversion
- *
- * Can sleep
- */
-static int set_up_therm_channel(u16 base_addr)
-{
- int ret;
-
- /* Enable all the sensor channels */
- ret = intel_msic_reg_write(base_addr, SKIN_SENSOR0_CODE);
- if (ret)
- return ret;
-
- ret = intel_msic_reg_write(base_addr + 1, SKIN_SENSOR1_CODE);
- if (ret)
- return ret;
-
- ret = intel_msic_reg_write(base_addr + 2, SYS_SENSOR_CODE);
- if (ret)
- return ret;
-
- /* Since this is the last channel, set the stop bit
- * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- ret = intel_msic_reg_write(base_addr + 3,
- (MSIC_DIE_SENSOR_CODE | 0x10));
- if (ret)
- return ret;
-
- /* Enable ADC and start it */
- return configure_adc(1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static int reset_stopbit(uint16_t addr)
-{
- int ret;
- uint8_t data;
- ret = intel_msic_reg_read(addr, &data);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- return intel_msic_reg_write(addr, (data & 0xEF));
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc
- * code.
- */
-static int find_free_channel(void)
-{
- int ret;
- int i;
- uint8_t data;
-
- /* check whether ADC is enabled */
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if ((data & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
- ret = intel_msic_reg_read(ADC_CHNL_START_ADDR + i, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static int mid_initialize_adc(struct device *dev)
-{
- u8 data;
- u16 base_addr;
- int ret;
-
- /*
- * Ensure that adctherm is disabled before we
- * initialize the ADC
- */
- ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL3, &data);
- if (ret)
- return ret;
-
- data &= ~MSIC_ADCTHERM_MASK;
- ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, data);
- if (ret)
- return ret;
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- dev_err(dev, "No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- ret = set_up_therm_channel(base_addr);
- if (ret) {
- dev_err(dev, "unable to enable ADC");
- return ret;
- }
- dev_dbg(dev, "ADC initialization successful");
- return ret;
-}
-
-/**
- * initialize_sensor - sets default temp and timer ranges
- * @index: index of the sensor
- *
- * Context: can sleep
- */
-static struct thermal_device_info *initialize_sensor(int index)
-{
- struct thermal_device_info *td_info =
- kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
- if (!td_info)
- return NULL;
-
- /* Set the base addr of the channel for this sensor */
- td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
- /* Sensor 3 is direct conversion */
- if (index == 3)
- td_info->direct = 1;
- return td_info;
-}
-
-/**
- * mid_thermal_resume - resume routine
- * @dev: device structure
- *
- * mid thermal resume: re-initializes the adc. Can sleep.
- */
-static int mid_thermal_resume(struct device *dev)
-{
- return mid_initialize_adc(dev);
-}
-
-/**
- * mid_thermal_suspend - suspend routine
- * @dev: device structure
- *
- * mid thermal suspend implements the suspend functionality
- * by stopping the ADC. Can sleep.
- */
-static int mid_thermal_suspend(struct device *dev)
-{
- /*
- * This just stops the ADC and does not disable it.
- * temporary workaround until we have a generic ADC driver.
- * If 0 is passed, it disables the ADC.
- */
- return configure_adc(0);
-}
-
-static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
- mid_thermal_suspend, mid_thermal_resume);
-
-/**
- * read_curr_temp - reads the current temperature and stores in temp
- * @temp: holds the current temperature value after reading
- *
- * Can sleep
- */
-static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
-{
- WARN_ON(tzd == NULL);
- return mid_read_temp(tzd, temp);
-}
-
-/* Can't be const */
-static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = read_curr_temp,
-};
-
-/**
- * mid_thermal_probe - mfld thermal initialize
- * @pdev: platform device structure
- *
- * mid thermal probe initializes the hardware and registers
- * all the sensors with the generic thermal framework. Can sleep.
- */
-static int mid_thermal_probe(struct platform_device *pdev)
-{
- static char *name[MSIC_THERMAL_SENSORS] = {
- "skin0", "skin1", "sys", "msicdie"
- };
-
- int ret;
- int i;
- struct platform_info *pinfo;
-
- pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
-
- /* Initializing the hardware */
- ret = mid_initialize_adc(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "ADC init failed");
- kfree(pinfo);
- return ret;
- }
-
- /* Register each sensor with the generic thermal framework*/
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- struct thermal_device_info *td_info = initialize_sensor(i);
-
- if (!td_info) {
- ret = -ENOMEM;
- goto err;
- }
- pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, 0, td_info, &tzd_ops, NULL, 0, 0);
- if (IS_ERR(pinfo->tzd[i])) {
- kfree(td_info);
- ret = PTR_ERR(pinfo->tzd[i]);
- goto err;
- }
- }
-
- pinfo->pdev = pdev;
- platform_set_drvdata(pdev, pinfo);
- return 0;
-
-err:
- while (--i >= 0) {
- kfree(pinfo->tzd[i]->devdata);
- thermal_zone_device_unregister(pinfo->tzd[i]);
- }
- configure_adc(0);
- kfree(pinfo);
- return ret;
-}
-
-/**
- * mid_thermal_remove - mfld thermal finalize
- * @dev: platform device structure
- *
- * MLFD thermal remove unregisters all the sensors from the generic
- * thermal framework. Can sleep.
- */
-static int mid_thermal_remove(struct platform_device *pdev)
-{
- int i;
- struct platform_info *pinfo = platform_get_drvdata(pdev);
-
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- kfree(pinfo->tzd[i]->devdata);
- thermal_zone_device_unregister(pinfo->tzd[i]);
- }
-
- kfree(pinfo);
- platform_set_drvdata(pdev, NULL);
-
- /* Stop the ADC */
- return configure_adc(0);
-}
-
-#define DRIVER_NAME "msic_thermal"
-
-static const struct platform_device_id therm_id_table[] = {
- { DRIVER_NAME, 1 },
- { "msic_thermal", 1 },
- { }
-};
-
-static struct platform_driver mid_thermal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- .pm = &mid_thermal_pm,
- },
- .probe = mid_thermal_probe,
- .remove = mid_thermal_remove,
- .id_table = therm_id_table,
-};
-
-module_platform_driver(mid_thermal_driver);
-
-MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
-MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
-MODULE_LICENSE("GPL");
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
#include <linux/device.h>
#include <linux/intel_pmic_gpio.h>
#include <linux/platform_device.h>
--- /dev/null
+/*
+ * intel_psh_ipc.c: Driver for the Intel PSH IPC mechanism
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Yang Bin (bin.yang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_psh_ipc.h>
+#include <asm/intel-mid.h>
+#include <linux/fs.h>
+#include <linux/intel_mid_pm.h>
+
+#define PSH_ERR(fmt, arg...) dev_err(&ipc_ctrl.pdev->dev, fmt, ##arg)
+#define PSH_DBG(fmt, arg...) dev_dbg(&ipc_ctrl.pdev->dev, fmt, ##arg)
+
+#define STATUS_PSH2IA(x) (1 << ((x) + 6))
+#define FLAG_BIND (1 << 0)
+
+#define PIMR_ADDR(x) (&ipc_ctrl.psh_regs->psh_regs_b_step.pimr##x)
+
+#define PSH_REG_ADDR(x) (&ipc_ctrl.psh_regs->psh_regs_b_step.x)
+
+#define PSH_CH_HANDLE(x) (ipc_ctrl.channel_handle[x])
+#define PSH_CH_DATA(x) (ipc_ctrl.channel_data[x])
+#define PSH_CH_FLAG(x) (ipc_ctrl.flags[x])
+
+/* PSH registers */
+union psh_registers {
+ /* reg mem map A */
+ struct {
+ u32 csr; /* 00h */
+ u32 res1; /* padding */
+ u32 pisr; /* 08h */
+ u32 pimr0; /* 0Ch */
+ u32 pimr1; /* 10h */
+ u32 pimr2; /* 14h */
+ u32 pimr3; /* 18h */
+ u32 pmctl; /* 1Ch */
+ u32 pmstat; /* 20h */
+ u32 res2; /* padding */
+ struct psh_msg ia2psh[NUM_IA2PSH_IPC];/* 28h ~ 44h + 3 */
+ struct psh_msg cry2psh;/* 48h ~ 4Ch + 3 */
+ struct psh_msg scu2psh;/* 50h ~ 54h + 3 */
+ u32 res3[2];/* padding */
+ struct psh_msg psh2ia[NUM_PSH2IA_IPC];/* 60h ~ 7Ch + 3 */
+ struct psh_msg psh2cry;/* 80h ~ 84h + 3 */
+ struct psh_msg psh2scu;/* 88h */
+ u32 msi_dir;/* 90h */
+ u32 res4[3];
+ u32 scratchpad[2];/* A0 */
+ } __packed psh_regs_a_step;
+ /* reg mem map B */
+ struct {
+ u32 pimr0; /* 00h */
+ u32 csr; /* 04h */
+ u32 pmctl; /* 08h */
+ u32 pmstat; /* 0Ch */
+ u32 psh_msi_direct; /* 10h */
+ u32 res1[59]; /* 14h ~ FCh + 3, padding */
+ u32 pimr3; /* 100h */
+ struct psh_msg scu2psh; /* 104h ~ 108h + 3 */
+ struct psh_msg psh2scu; /* 10Ch ~ 110h + 3 */
+ u32 res2[187]; /* 114h ~ 3FCh + 3, padding */
+ u32 pisr; /* 400h */
+ u32 scratchpad[2]; /* 404h ~ 407h */
+ u32 res3[61]; /* 40Ch ~ 4FCh + 3, padding */
+ u32 pimr1; /* 500h */
+ struct psh_msg ia2psh[NUM_IA2PSH_IPC]; /* 504h ~ 520h + 3 */
+ struct psh_msg psh2ia[NUM_PSH2IA_IPC]; /* 524h ~ 540h + 3 */
+ u32 res4[175]; /* 544h ~ 7FCh + 3, padding */
+ u32 pimr2; /* 800h */
+ struct psh_msg cry2psh; /* 804h ~ 808h + 3 */
+ struct psh_msg psh2cry; /* 80Ch ~ 810h + 3 */
+ } __packed psh_regs_b_step;
+} __packed;
+
+static struct ipc_controller_t {
+ int reg_map;
+ int initialized;
+ struct pci_dev *pdev;
+ spinlock_t lock;
+ int flags[NUM_ALL_CH];
+ union psh_registers *psh_regs;
+ struct semaphore ch_lock[NUM_ALL_CH];
+ struct mutex psh_mutex;
+ psh_channel_handle_t channel_handle[NUM_PSH2IA_IPC];
+ void *channel_data[NUM_PSH2IA_IPC];
+} ipc_ctrl;
+
+
+/**
+ * intel_ia2psh_command - send IA to PSH command
+ * Send ia2psh command and return psh message and status
+ *
+ * @in: input psh message
+ * @out: output psh message
+ * @ch: psh channel
+ * @timeout: timeout for polling busy bit, in us
+ */
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+ int ch, int timeout)
+{
+ int ret = 0;
+ u32 status;
+
+ might_sleep();
+
+ if (!ipc_ctrl.initialized)
+ return -ENODEV;
+
+ if (ch < PSH_SEND_CH0 || ch > PSH_SEND_CH0 + NUM_IA2PSH_IPC - 1
+ || in == NULL)
+ return -EINVAL;
+
+ if (!in || in->msg & CHANNEL_BUSY)
+ return -EINVAL;
+
+ pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+ down(&ipc_ctrl.ch_lock[ch]);
+
+ in->msg |= CHANNEL_BUSY;
+ /* Check if channel is ready for IA sending command */
+
+ if (readl(PSH_REG_ADDR(ia2psh[ch].msg)) & CHANNEL_BUSY) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ writel(in->param, PSH_REG_ADDR(ia2psh[ch].param));
+ writel(in->msg, PSH_REG_ADDR(ia2psh[ch].msg));
+
+ /* Input timeout is zero, do not check channel status */
+ if (timeout == 0)
+ goto end;
+
+ /* Input timeout is nonzero, check channel status */
+ while (((status = readl(PSH_REG_ADDR(ia2psh[ch].msg))) & CHANNEL_BUSY)
+ && timeout) {
+ usleep_range(100, 101);
+ timeout -= 100;
+ }
+
+ if (timeout <= 0) {
+ ret = -ETIMEDOUT;
+ PSH_ERR("ia2psh channel %d is always busy!\n", ch);
+ goto end;
+ } else {
+ if (out == NULL)
+ goto end;
+
+ out->param = readl(PSH_REG_ADDR(ia2psh[ch].param));
+ out->msg = status;
+ }
+
+end:
+ up(&ipc_ctrl.ch_lock[ch]);
+ pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_ia2psh_command);
+
+/**
+ * intel_psh_ipc_bind - bind a handler to a psh channel
+ *
+ * @ch: psh channel
+ * @handle: handle function called when IA received psh interrupt
+ * @data: data passed to handle
+ */
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data)
+{
+ unsigned long flags;
+
+ if (!ipc_ctrl.initialized)
+ return -ENODEV;
+
+ if (!handle || ch < PSH_RECV_CH0
+ || ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+ return -EINVAL;
+
+ mutex_lock(&ipc_ctrl.psh_mutex);
+ down(&ipc_ctrl.ch_lock[ch]);
+ if (PSH_CH_HANDLE(ch - PSH_RECV_CH0) != NULL) {
+ up(&ipc_ctrl.ch_lock[ch]);
+ mutex_unlock(&ipc_ctrl.psh_mutex);
+ return -EBUSY;
+ } else {
+ PSH_CH_DATA(ch - PSH_RECV_CH0) = data;
+ PSH_CH_HANDLE(ch - PSH_RECV_CH0) = handle;
+ }
+ up(&ipc_ctrl.ch_lock[ch]);
+
+ pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+ spin_lock_irqsave(&ipc_ctrl.lock, flags);
+ PSH_CH_FLAG(ch) |= FLAG_BIND;
+ writel(readl(PIMR_ADDR(1)) | (1 << (ch - PSH_RECV_CH0)), PIMR_ADDR(1));
+ spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+ pm_runtime_put(&ipc_ctrl.pdev->dev);
+ mutex_unlock(&ipc_ctrl.psh_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_psh_ipc_bind);
+
+/**
+ * intel_psh_ipc_unbind - unbind a handler to a psh channel
+ *
+ * @ch: psh channel
+ */
+void intel_psh_ipc_unbind(int ch)
+{
+ unsigned long flags;
+
+ if (!ipc_ctrl.initialized)
+ return;
+
+ if (ch < PSH_RECV_CH0 || ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+ return;
+
+ if (!(PSH_CH_FLAG(ch) & FLAG_BIND))
+ return;
+
+ mutex_lock(&ipc_ctrl.psh_mutex);
+ pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+ spin_lock_irqsave(&ipc_ctrl.lock, flags);
+ PSH_CH_FLAG(ch) &= ~FLAG_BIND;
+ writel(readl(PIMR_ADDR(1)) & (~(1 << (ch - PSH_RECV_CH0))),
+ PIMR_ADDR(1));
+ spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+ pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+ down(&ipc_ctrl.ch_lock[ch]);
+ PSH_CH_HANDLE(ch - PSH_RECV_CH0) = NULL;
+ up(&ipc_ctrl.ch_lock[ch]);
+ mutex_unlock(&ipc_ctrl.psh_mutex);
+}
+EXPORT_SYMBOL(intel_psh_ipc_unbind);
+
+void intel_psh_ipc_disable_irq(void)
+{
+ disable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_disable_irq);
+
+void intel_psh_ipc_enable_irq(void)
+{
+ enable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_enable_irq);
+
+static void psh_recv_handle(int i)
+{
+ int msg, param;
+
+ down(&ipc_ctrl.ch_lock[i + PSH_RECV_CH0]);
+
+ msg = readl(PSH_REG_ADDR(psh2ia[i].msg)) & (~CHANNEL_BUSY);
+ param = readl(PSH_REG_ADDR(psh2ia[i].param));
+
+ if (PSH_CH_HANDLE(i) == NULL) {
+ PSH_ERR("Ignore message from channel %d\n", i+PSH_RECV_CH0);
+ goto end;
+ }
+
+ /* write back to clear the busy bit */
+ writel(msg, PSH_REG_ADDR(psh2ia[i].msg));
+ PSH_CH_HANDLE(i)(msg, param, PSH_CH_DATA(i));
+end:
+ up(&ipc_ctrl.ch_lock[i+PSH_RECV_CH0]);
+}
+
+static irqreturn_t psh_ipc_irq(int irq, void *data)
+{
+ int i;
+ u32 status;
+
+ pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+ status = readl(PSH_REG_ADDR(pisr));
+
+ for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+ if (status & STATUS_PSH2IA(i))
+ psh_recv_handle(i);
+ }
+
+ pm_runtime_put(&ipc_ctrl.pdev->dev);
+ return IRQ_HANDLED;
+}
+
+static void psh_regs_dump(void)
+{
+ int i;
+
+ pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+ PSH_ERR("\n<-------------start------------>\n");
+
+ PSH_ERR("csr:\t%#x\n", readl(PSH_REG_ADDR(csr)));
+ PSH_ERR("pisr:\t%#x\n", readl(PSH_REG_ADDR(pisr)));
+
+ PSH_ERR("pimr0:\t%#x\n", readl(PIMR_ADDR(0)));
+ PSH_ERR("pimr1:\t%#x\n", readl(PIMR_ADDR(1)));
+ PSH_ERR("pimr2:\t%#x\n", readl(PIMR_ADDR(2)));
+ PSH_ERR("pimr3:\t%#x\n", readl(PIMR_ADDR(3)));
+
+ PSH_ERR("pmctl:\t%#x\n", readl(PSH_REG_ADDR(pmctl)));
+ PSH_ERR("pmstat:\t%#x\n", readl(PSH_REG_ADDR(pmstat)));
+ PSH_ERR("scratchpad0:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[0])));
+ PSH_ERR("scratchpad1:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[1])));
+
+ for (i = 0; i < NUM_IA2PSH_IPC; i++) {
+ PSH_ERR("ia2psh[%d].msg:\t%#x\n", i,
+ readl(PSH_REG_ADDR(ia2psh[i].msg)));
+ PSH_ERR("ia2psh[%d].param:\t%#x\n", i,
+ readl(PSH_REG_ADDR(ia2psh[i].param)));
+ }
+
+ PSH_ERR("cry2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.msg)));
+ PSH_ERR("cry2psh.param:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.param)));
+ PSH_ERR("scu2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.msg)));
+ PSH_ERR("scu2psh.param:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.param)));
+
+ for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+ PSH_ERR("psh2ia[%d].msg:\t%#x\n", i,
+ readl(PSH_REG_ADDR(psh2ia[i].msg)));
+ PSH_ERR("psh2ia[%d].param:\t%#x\n", i,
+ readl(PSH_REG_ADDR(psh2ia[i].param)));
+ }
+
+ PSH_ERR("psh2cry.msg:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.msg)));
+ PSH_ERR("psh2cry.param:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.param)));
+
+ PSH_ERR("\n<-------------end------------>\n");
+ pm_runtime_put(&ipc_ctrl.pdev->dev);
+}
+
+static struct psh_msg psh_dbg_msg;
+static int psh_ch;
+
+static ssize_t psh_msg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "\nLast ia2psh command with msg: %#x\nparam: %#x\n",
+ psh_dbg_msg.msg, psh_dbg_msg.param);
+}
+
+static ssize_t psh_msg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ u32 msg, param;
+
+ memset(&psh_dbg_msg, 0, sizeof(psh_dbg_msg));
+
+ ret = sscanf(buf, "%x %x", &msg, ¶m);
+ if (ret != 2) {
+ PSH_ERR("Input two arguments as psh msg and param\n");
+ return -EINVAL;
+ }
+
+ psh_dbg_msg.msg = msg;
+ psh_dbg_msg.param = param;
+
+ return size;
+}
+
+static ssize_t psh_ch_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "\nLast psh channel: %d\n", psh_ch);
+}
+
+static ssize_t psh_ch_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+
+ ret = sscanf(buf, "%d", &psh_ch);
+ if (ret != 1) {
+ PSH_ERR("Input one argument as psh channel\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t psh_send_cmd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int psh_dbg_err;
+ struct psh_msg out_msg;
+
+ memset(&out_msg, 0, sizeof(out_msg));
+
+ psh_dbg_err = intel_ia2psh_command(&psh_dbg_msg, &out_msg,
+ psh_ch, 3000000);
+ if (psh_dbg_err) {
+ PSH_ERR("Send ia2psh command failed, err %d\n", psh_dbg_err);
+ psh_regs_dump();
+ return psh_dbg_err;
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(psh_msg, S_IRUGO | S_IWUSR, psh_msg_show, psh_msg_store);
+static DEVICE_ATTR(psh_ch, S_IRUGO | S_IWUSR, psh_ch_show, psh_ch_store);
+static DEVICE_ATTR(ia2psh_cmd, S_IWUSR, NULL, psh_send_cmd_store);
+
+static struct attribute *psh_attrs[] = {
+ &dev_attr_psh_msg.attr,
+ &dev_attr_psh_ch.attr,
+ &dev_attr_ia2psh_cmd.attr,
+ NULL,
+};
+
+static struct attribute_group psh_attr_group = {
+ .name = "psh_debug",
+ .attrs = psh_attrs,
+};
+
+static int intel_psh_debug_sysfs_create(struct pci_dev *pdev)
+{
+ return sysfs_create_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+static void pmic_sysfs_remove(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+#ifdef CONFIG_PM
+static int psh_ipc_suspend_noirq(struct device *dev)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < NUM_ALL_CH; i++) {
+ if (down_trylock(&ipc_ctrl.ch_lock[i])) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (; i > 0; i--)
+ up(&ipc_ctrl.ch_lock[i - 1]);
+ }
+
+ return ret;
+}
+
+static int psh_ipc_resume_noirq(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_ALL_CH; i++)
+ up(&ipc_ctrl.ch_lock[i]);
+
+ return 0;
+}
+
+#else
+
+#define psh_ipc_suspend_noirq NULL
+#define psh_ipc_resume_noirq NULL
+
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int psh_ipc_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "runtime suspend called\n");
+ return 0;
+}
+
+static int psh_ipc_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "runtime resume called\n");
+ return 0;
+}
+
+#else
+
+#define psh_ipc_runtime_suspend NULL
+#define psh_ipc_runtime_resume NULL
+
+#endif
+
+static int psh_ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int i, ret;
+ unsigned long start, len;
+
+ ipc_ctrl.pdev = pci_dev_get(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err1;
+
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!start || !len) {
+ ret = -ENODEV;
+ goto err1;
+ }
+
+ ret = pci_request_regions(pdev, "intel_psh_ipc");
+ if (ret)
+ goto err1;
+
+ switch (intel_mid_identify_cpu()) {
+ case INTEL_MID_CPU_CHIP_TANGIER:
+ ipc_ctrl.reg_map = 1;
+ break;
+ case INTEL_MID_CPU_CHIP_ANNIEDALE:
+ ipc_ctrl.reg_map = 1;
+ break;
+ default:
+ dev_err(&pdev->dev, "error register map\n");
+ ret = -EINVAL;
+ goto err2;
+ break;
+ }
+
+ ipc_ctrl.psh_regs = (union psh_registers *)ioremap_nocache(start, len);
+ if (!ipc_ctrl.psh_regs) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ ret = request_threaded_irq(pdev->irq, NULL, psh_ipc_irq, IRQF_ONESHOT,
+ "intel_psh_ipc", NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register irq %d\n", pdev->irq);
+ goto err3;
+ }
+
+ irq_set_irq_wake(pdev->irq, 1);
+
+ spin_lock_init(&ipc_ctrl.lock);
+ mutex_init(&ipc_ctrl.psh_mutex);
+
+ for (i = 0; i < NUM_ALL_CH; i++)
+ sema_init(&ipc_ctrl.ch_lock[i], 1);
+
+ intel_psh_devices_create();
+
+ intel_psh_debug_sysfs_create(pdev);
+
+ ipc_ctrl.initialized = 1;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+
+err3:
+ iounmap(ipc_ctrl.psh_regs);
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_dev_put(pdev);
+
+ return ret;
+}
+
+static void psh_ipc_remove(struct pci_dev *pdev)
+{
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ free_irq(pdev->irq, NULL);
+ iounmap(ipc_ctrl.psh_regs);
+ pci_release_regions(pdev);
+ pci_dev_put(pdev);
+ intel_psh_devices_destroy();
+ pmic_sysfs_remove(pdev);
+ ipc_ctrl.initialized = 0;
+}
+
+static const struct dev_pm_ops psh_ipc_drv_pm_ops = {
+ .suspend_noirq = psh_ipc_suspend_noirq,
+ .resume_noirq = psh_ipc_resume_noirq,
+ .runtime_suspend = psh_ipc_runtime_suspend,
+ .runtime_resume = psh_ipc_runtime_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a3)},
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver psh_ipc_driver = {
+ .name = "intel_psh_ipc",
+ .driver = {
+ .pm = &psh_ipc_drv_pm_ops,
+ },
+ .id_table = pci_ids,
+ .probe = psh_ipc_probe,
+ .remove = psh_ipc_remove,
+};
+
+static int __init psh_ipc_init(void)
+{
+ return pci_register_driver(&psh_ipc_driver);
+}
+
+static void __exit psh_ipc_exit(void)
+{
+ pci_unregister_driver(&psh_ipc_driver);
+}
+
+MODULE_AUTHOR("bin.yang@intel.com");
+MODULE_DESCRIPTION("Intel PSH IPC driver");
+MODULE_LICENSE("GPL v2");
+
+fs_initcall(psh_ipc_init);
+module_exit(psh_ipc_exit);
--- /dev/null
+/* intel_scu_flis.c SCU FLIS INTERFACES
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+static struct rpmsg_instance *flis_instance;
+
+static u32 shim_flis_addr;
+static u32 shim_offset;
+static u32 shim_data;
+static char shim_ops[OPS_STR_LEN];
+
+static u32 param_type; /* flis param type: PULL/PIN DIRECTION/OPEN_DRAIN */
+static u32 param_value; /* value of certain flis param */
+static unsigned int pin_name;
+static char ops[OPS_STR_LEN];
+
+struct intel_scu_flis_info {
+ struct pinstruct_t *pin_t;
+ struct pin_mmio_flis_t *mmio_flis_t;
+ int pin_num;
+ int initialized;
+ void *flis_base;
+ u32 flis_paddr;
+};
+
+static struct intel_scu_flis_info flis_info;
+
+static DEFINE_SPINLOCK(mmio_flis_lock);
+
+u32 get_flis_value(u32 offset)
+{
+ struct intel_scu_flis_info *isfi = &flis_info;
+ u32 __iomem *mem;
+
+ if (!isfi->initialized || !isfi->flis_base)
+ return -ENODEV;
+
+ mem = (void __iomem *)(isfi->flis_base + offset);
+
+ return readl(mem);
+}
+EXPORT_SYMBOL(get_flis_value);
+
+void set_flis_value(u32 value, u32 offset)
+{
+ struct intel_scu_flis_info *isfi = &flis_info;
+ u32 __iomem *mem;
+ unsigned long flags;
+
+ if (!isfi->initialized || !isfi->flis_base)
+ return;
+
+ /*
+ * There is one security region for Merrifield FLIS, which
+ * are read only to OS side. Use IPC when write access is needed.
+ */
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER
+ && offset >= 0x1d00
+ && offset <= 0x1d34) {
+ /* IPC call should not be called in atomic context */
+ might_sleep();
+ rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+ (u8 *)&value, 4,
+ NULL, 0,
+ isfi->flis_paddr + offset, 0);
+
+ } else {
+ mem = (void __iomem *)(isfi->flis_base + offset);
+ spin_lock_irqsave(&mmio_flis_lock, flags);
+ writel(value, mem);
+ spin_unlock_irqrestore(&mmio_flis_lock, flags);
+ }
+}
+EXPORT_SYMBOL(set_flis_value);
+
+/* directly write to flis address */
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset)
+{
+ int ret;
+ u32 ipc_wbuf[3];
+
+ /* offset 0xff means the flis is reserved, just return 0*/
+ if (offset == 0xFF)
+ return 0;
+
+ ipc_wbuf[0] = flis_addr; /* wbuf[0]: flis address */
+ ipc_wbuf[1] = offset; /* wbuf[1]: register offset */
+ ipc_wbuf[2] = data; /* wbuf[2]: data */
+
+ ret = rpmsg_send_command(flis_instance, IPCMSG_SHIM_CONFIG,
+ IPC_CMD_SHIM_WR, (u8 *)ipc_wbuf, NULL, 12, 0);
+ if (ret)
+ pr_err("%s: failed to write shim, flis addr: 0x%x, offset: 0x%x\n",
+ __func__, flis_addr, offset);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_shim);
+
+/* directly read from flis address */
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset)
+{
+ int ret;
+ u32 ipc_wbuf[2];
+
+ /* offset 0xff means the flis is reserved, just return 0 */
+ if (offset == 0xFF)
+ return 0;
+
+ ipc_wbuf[0] = flis_addr;
+ ipc_wbuf[1] = offset;
+
+ ret = rpmsg_send_command(flis_instance, IPCMSG_SHIM_CONFIG,
+ IPC_CMD_SHIM_RD, (u8 *)ipc_wbuf, data, 8, 1);
+ if (ret)
+ pr_err("%s: failed to read shim, flis addr: 0x%x, offset: 0x%x\n",
+ __func__, flis_addr, offset);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_shim);
+
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset)
+{
+ u32 tmp = 0;
+ int ret;
+
+ ret = intel_scu_ipc_read_shim(&tmp, flis_addr, offset);
+ if (ret) {
+ pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+ flis_addr, offset);
+ return ret;
+ }
+
+ tmp &= ~mask;
+ tmp |= (data & mask);
+
+ ret = intel_scu_ipc_write_shim(tmp, flis_addr, offset);
+ if (ret) {
+ pr_err("write shim failed, addr = 0x%x, off = 0x%x\n",
+ flis_addr, offset);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_shim);
+
+/**
+ * config_pin_flis -- configure pin mux,
+ * pull direction and strength and open-drain enable.
+ *
+ * @name: pin name
+ * @param: flis param
+ * @val: value to be set
+ *
+ * example:
+ * config pull up/down:
+ * config_pin_flis(i2s_2_clk, PULL, UP_20K);
+ * config_pin_flis(i2s_2_clk, PULL, DOWN_20K);
+ *
+ * config pin mux:
+ * config_pin_flis(i2s_2_clk, MUX, MUX_EN_INPUT_EN);
+ * config_pin_flis(i2s_2_clk, MUX, INPUT_EN);
+ * config_pin_flis(i2s_2_clk, MUX, MUX_EN_OUTPUT_EN);
+ * config_pin_flis(i2s_2_clk, MUX, OUTPUT_EN);
+ *
+ * config pin open-drain:
+ * config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_ENABLE);
+ * config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_DISABLE);
+ *
+ */
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val)
+{
+ u32 flis_addr, off, data, mask;
+ int ret;
+ int pos;
+ struct intel_scu_flis_info *isfi = &flis_info;
+ struct pin_mmio_flis_t *mmft;
+ u32 old_val;
+
+ if (!isfi->initialized)
+ return -ENODEV;
+
+ if (name < 0 || name >= isfi->pin_num)
+ return -EINVAL;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ /* Check if the pin is configurable */
+ if (isfi->pin_t[name].valid == false)
+ return -EINVAL;
+
+ flis_addr = isfi->pin_t[name].bus_address;
+
+ switch (param) {
+ case PULL:
+ off = isfi->pin_t[name].pullup_offset;
+ pos = isfi->pin_t[name].pullup_lsb_pos;
+ mask = (PULL_MASK << pos);
+ break;
+ case MUX:
+ off = isfi->pin_t[name].direction_offset;
+ pos = isfi->pin_t[name].direction_lsb_pos;
+ mask = (MUX_MASK << pos);
+ break;
+ case OPEN_DRAIN:
+ off = isfi->pin_t[name].open_drain_offset;
+ pos = isfi->pin_t[name].open_drain_bit;
+ mask = (OPEN_DRAIN_MASK << pos);
+ break;
+ default:
+ pr_err("Please specify valid flis param\n");
+ return -EINVAL;
+ }
+
+ data = (val << pos);
+ pr_debug("addr = 0x%x, off = 0x%x, pos = %d, mask = 0x%x, data = 0x%x\n",
+ flis_addr, off, pos, mask, data);
+
+ ret = intel_scu_ipc_update_shim(data, mask, flis_addr, off);
+ if (ret) {
+ pr_err("update shim failed\n");
+ return ret;
+ }
+ } else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ mmft = isfi->mmio_flis_t;
+ off = mmft[name].offset;
+
+ /* Check if the FLIS is writable by mmio access */
+ if (!(mmft[name].access_ctrl & writable))
+ return -EINVAL;
+
+ old_val = get_flis_value(off);
+
+ switch (param) {
+ case PULL:
+ mask = PULL_MASK;
+ break;
+ case MUX:
+ mask = MUX_MASK;
+ break;
+ case OPEN_DRAIN:
+ mask = OPEN_DRAIN_MASK;
+ break;
+ default:
+ pr_err("Please specify valid flis param\n");
+ return -EINVAL;
+ }
+
+ set_flis_value((old_val & ~mask) | val, off);
+
+ } else
+ return -EINVAL;
+
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(config_pin_flis);
+
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val)
+{
+ u32 flis_addr, off;
+ u32 data = 0;
+ int ret;
+ int pos;
+ u32 mask;
+ struct intel_scu_flis_info *isfi = &flis_info;
+ struct pin_mmio_flis_t *mmft;
+ u32 old_val;
+
+ if (!isfi->initialized)
+ return -ENODEV;
+
+ if (name < 0 || name >= isfi->pin_num)
+ return -EINVAL;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ if (isfi->pin_t[name].valid == false)
+ return -EINVAL;
+
+ flis_addr = isfi->pin_t[name].bus_address;
+
+ switch (param) {
+ case PULL:
+ off = isfi->pin_t[name].pullup_offset;
+ pos = isfi->pin_t[name].pullup_lsb_pos;
+ mask = PULL_MASK;
+ break;
+ case MUX:
+ off = isfi->pin_t[name].direction_offset;
+ pos = isfi->pin_t[name].direction_lsb_pos;
+ mask = MUX_MASK;
+ break;
+ case OPEN_DRAIN:
+ off = isfi->pin_t[name].open_drain_offset;
+ pos = isfi->pin_t[name].open_drain_bit;
+ mask = OPEN_DRAIN_MASK;
+ break;
+ default:
+ pr_err("Please specify valid flis param\n");
+ return -EINVAL;
+ }
+
+ ret = intel_scu_ipc_read_shim(&data, flis_addr, off);
+ if (ret) {
+ pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+ flis_addr, off);
+ return ret;
+ }
+
+ *val = (data >> pos) & mask;
+
+ pr_debug("read: data = 0x%x, val = 0x%x\n", data, *val);
+ } else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ mmft = isfi->mmio_flis_t;
+ off = mmft[name].offset;
+
+ old_val = get_flis_value(off);
+
+ switch (param) {
+ case PULL:
+ pos = 4;
+ mask = PULL_MASK;
+ break;
+ case MUX:
+ pos = 12;
+ mask = MUX_MASK;
+ break;
+ case OPEN_DRAIN:
+ pos = 21;
+ mask = OPEN_DRAIN_MASK;
+ break;
+ default:
+ pr_err("Please specify valid flis param\n");
+ return -EINVAL;
+ }
+
+ *val = (old_val & mask) >> pos;
+
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(get_pin_flis);
+
+static void flis_generic_store(const char *buf, int type)
+{
+ u32 tmp;
+ int ret;
+
+ /* use decimal for pin number */
+ if (type == DBG_PIN_NAME)
+ ret = sscanf(buf, "%d", &tmp);
+ else
+ ret = sscanf(buf, "%x", &tmp);
+
+ if (ret != 1)
+ return;
+
+ switch (type) {
+ case DBG_SHIM_FLIS_ADDR:
+ shim_flis_addr = tmp;
+ break;
+ case DBG_SHIM_OFFSET:
+ shim_offset = tmp;
+ break;
+ case DBG_SHIM_DATA:
+ shim_data = tmp;
+ break;
+ case DBG_PARAM_VAL:
+ param_value = tmp;
+ break;
+ case DBG_PARAM_TYPE:
+ param_type = tmp;
+ break;
+ case DBG_PIN_NAME:
+ pin_name = tmp;
+ break;
+ default:
+ break;
+ }
+}
+
+static ssize_t shim_flis_addr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_SHIM_FLIS_ADDR);
+ return size;
+}
+
+static ssize_t shim_flis_addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_flis_addr);
+}
+
+static ssize_t shim_offset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_SHIM_OFFSET);
+ return size;
+}
+
+static ssize_t shim_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_offset);
+}
+
+static ssize_t shim_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_SHIM_DATA);
+ return size;
+}
+
+static ssize_t shim_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_data);
+}
+
+static ssize_t shim_ops_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ memset(shim_ops, 0, sizeof(shim_ops));
+
+ ret = sscanf(buf, "%9s", shim_ops);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (!strncmp("read", shim_ops, OPS_STR_LEN)) {
+ ret = intel_scu_ipc_read_shim(&shim_data, shim_flis_addr,
+ shim_offset);
+ } else if (!strncmp("write", shim_ops, OPS_STR_LEN)) {
+ ret = intel_scu_ipc_write_shim(shim_data, shim_flis_addr,
+ shim_offset);
+ } else {
+ dev_err(dev, "Not supported ops\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(dev, "shim config met error\n");
+ return ret;
+ }
+
+ return size;
+}
+
+static ssize_t param_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", param_value);
+}
+
+static ssize_t param_val_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_PARAM_VAL);
+ return size;
+}
+
+static ssize_t flis_param_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", param_type);
+}
+
+static ssize_t flis_param_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_PARAM_TYPE);
+ return size;
+}
+
+static ssize_t pinname_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", pin_name);
+}
+
+static ssize_t pinname_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ flis_generic_store(buf, DBG_PIN_NAME);
+ return size;
+}
+
+static ssize_t ops_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ memset(ops, 0, sizeof(ops));
+
+ ret = sscanf(buf, "%9s", ops);
+ if (ret != 1) {
+ dev_err(dev, "input error\n");
+ return -EINVAL;
+ }
+
+ if (!strncmp("get", ops, OPS_STR_LEN))
+ ret = get_pin_flis(pin_name, param_type, ¶m_value);
+ else if (!strncmp("set", ops, OPS_STR_LEN))
+ ret = config_pin_flis(pin_name, param_type, param_value);
+ else {
+ dev_err(dev, "wrong ops\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(dev, "Access flis error, ret = %d\n", ret);
+ return ret;
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(flis_addr, S_IRUGO|S_IWUSR,
+ shim_flis_addr_show, shim_flis_addr_store);
+static DEVICE_ATTR(offset, S_IRUGO|S_IWUSR,
+ shim_offset_show, shim_offset_store);
+static DEVICE_ATTR(data, S_IRUGO|S_IWUSR, shim_data_show, shim_data_store);
+static DEVICE_ATTR(flis_ops, S_IWUSR, NULL, shim_ops_store);
+
+static struct attribute *flis_attrs[] = {
+ &dev_attr_flis_addr.attr,
+ &dev_attr_offset.attr,
+ &dev_attr_data.attr,
+ &dev_attr_flis_ops.attr,
+ NULL,
+};
+
+static struct attribute_group flis_attr_group = {
+ .name = "flis_debug",
+ .attrs = flis_attrs,
+};
+
+static DEVICE_ATTR(pin_name, S_IRUGO|S_IWUSR, pinname_show, pinname_store);
+static DEVICE_ATTR(flis_param, S_IRUGO|S_IWUSR, flis_param_type_show,
+ flis_param_type_store);
+static DEVICE_ATTR(val, S_IRUGO|S_IWUSR, param_val_show, param_val_store);
+static DEVICE_ATTR(ops, S_IWUSR, NULL, ops_store);
+
+static struct attribute *pin_config_attrs[] = {
+ &dev_attr_pin_name.attr,
+ &dev_attr_flis_param.attr,
+ &dev_attr_val.attr,
+ &dev_attr_ops.attr,
+ NULL,
+};
+
+static struct attribute_group pin_config_attr_group = {
+ .name = "pin_config_debug",
+ .attrs = pin_config_attrs,
+};
+
+static int scu_flis_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct intel_scu_flis_info *isfi = &flis_info;
+ struct intel_scu_flis_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ isfi->pin_t = pdata->pin_t;
+ isfi->pin_num = pdata->pin_num;
+ isfi->mmio_flis_t = pdata->mmio_flis_t;
+ if (pdata->mmio_flis_t && pdata->flis_base) {
+ isfi->flis_paddr = pdata->flis_base;
+ isfi->flis_base = ioremap_nocache(pdata->flis_base,
+ pdata->flis_len);
+ if (!isfi->flis_base) {
+ dev_err(&pdev->dev, "error mapping flis base\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ if ((isfi->pin_t || isfi->mmio_flis_t)&&isfi->pin_num)
+ isfi->initialized = 1;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &flis_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create flis sysfs interface\n");
+ goto err1;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &pin_config_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to create pin config sysfs interface\n");
+ goto err2;
+ }
+
+ dev_info(&pdev->dev, "scu flis probed\n");
+ return 0;
+
+err2:
+ sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+err1:
+ if (pdata->flis_base)
+ iounmap(isfi->flis_base);
+out:
+ isfi->initialized = 0;
+ return ret;
+}
+
+static int scu_flis_remove(struct platform_device *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &pin_config_attr_group);
+ sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver scu_flis_driver = {
+ .driver = {
+ .name = "intel_scu_flis",
+ .owner = THIS_MODULE,
+ },
+ .probe = scu_flis_probe,
+ .remove = scu_flis_remove,
+};
+
+static int scu_flis_module_init(void)
+{
+ return platform_driver_register(&scu_flis_driver);
+}
+
+static void scu_flis_module_exit(void)
+{
+ platform_driver_unregister(&scu_flis_driver);
+}
+
+static int flis_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed flis rpmsg device\n");
+
+ /* Allocate rpmsg instance for flis*/
+ ret = alloc_rpmsg_instance(rpdev, &flis_instance);
+ if (!flis_instance) {
+ dev_err(&rpdev->dev, "kzalloc flis instance failed\n");
+ goto out;
+ }
+
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(flis_instance);
+
+ ret = scu_flis_module_init();
+ if (ret)
+ free_rpmsg_instance(rpdev, &flis_instance);
+
+out:
+ return ret;
+}
+
+static void flis_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ scu_flis_module_exit();
+ free_rpmsg_instance(rpdev, &flis_instance);
+ dev_info(&rpdev->dev, "Removed flis rpmsg device\n");
+}
+
+static void flis_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id flis_rpmsg_id_table[] = {
+ { .name = "rpmsg_flis" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, flis_rpmsg_id_table);
+
+static struct rpmsg_driver flis_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = flis_rpmsg_id_table,
+ .probe = flis_rpmsg_probe,
+ .callback = flis_rpmsg_cb,
+ .remove = flis_rpmsg_remove,
+};
+
+static int __init flis_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&flis_rpmsg);
+}
+
+static void __exit flis_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&flis_rpmsg);
+}
+
+fs_initcall(flis_rpmsg_init);
+module_exit(flis_rpmsg_exit);
+
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel FLIS Access Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * fw_update.c - Intel SCU Firmware Update Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/rpmsg.h>
+#include <linux/intel_mid_pm.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+/* Medfield & Cloverview firmware update.
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. For more details, please refer to
+ * Firmware Arch Spec.
+ * Below macros and structs apply for medfield firmware update
+ */
+
+#define IPC_CMD_FW_UPDATE_GO 0x02
+
+#define MAX_FW_CHUNK (128*1024)
+#define IFWX_CHUNK_SIZE (96*1024)
+
+#define SRAM_ADDR 0xFFFC0000
+#define MAILBOX_ADDR 0xFFFE0000
+
+#define SCU_FLAG_OFFSET 8
+#define IA_FLAG_OFFSET 12
+
+#define MIP_HEADER_OFFSET 0
+#define SUCP_OFFSET 0x1D8000
+#define VEDFW_OFFSET 0x1A6000
+
+#define DNX_HDR_LEN 24
+#define FUPH_HDR_LEN 36
+
+#define DNX_IMAGE "DXBL"
+#define FUPH_HDR_SIZE "RUPHS"
+#define FUPH "RUPH"
+#define MIP "DMIP"
+#define IFWI "IFW"
+#define LOWER_128K "LOFW"
+#define UPPER_128K "HIFW"
+#define PSFW1 "PSFW1"
+#define PSFW2 "PSFW2"
+#define SSFW "SSFW"
+#define SUCP "SuCP"
+#define VEDFW "VEDFW"
+#define UPDATE_DONE "HLT$"
+#define UPDATE_ABORT "HLT0"
+#define UPDATE_ERROR "ER"
+
+#define MAX_LEN_IFW 4
+#define MAX_LEN_PSFW 7
+#define MAX_LEN_SSFW 6
+#define MAX_LEN_SUCP 6
+#define MAX_LEN_VEDFW 7
+
+#define FUPH_MIP_OFFSET 0x04
+#define FUPH_IFWI_OFFSET 0x08
+#define FUPH_PSFW1_OFFSET 0x0c
+#define FUPH_PSFW2_OFFSET 0x10
+#define FUPH_SSFW_OFFSET 0x14
+#define FUPH_SUCP_OFFSET 0x18
+#define FUPH_VEDFW_OFFSET 0x1c
+
+#define DNX_MAX_SIZE (128*1024)
+#define IFWI_MAX_SIZE (3*1024*1024)
+#define FOTA_MEM_SIZE (4*1024*1024)
+
+#define DNX_SIZE_OFFSET 0
+#define GP_FLAG_OFFSET 4
+#define XOR_CHK_OFFSET 20
+
+#define GPF_BIT32 1
+#define FUPH_STR "UPH$"
+#define FUPH_MAX_LEN 36
+#define SKIP_BYTES 8
+
+static struct kobject *scu_fw_update_kobj;
+static struct rpmsg_instance *fw_update_instance;
+
+/* Modified IA-SCU mailbox for medfield firmware update. */
+struct ia_scu_mailbox {
+ char mail[8];
+ u32 scu_flag;
+ u32 ia_flag;
+};
+
+/* Structure to parse input from firmware-update application. */
+struct fw_ud {
+ u8 *fw_file_data;
+ u32 fsize;
+ u8 *dnx_hdr;
+ u8 *dnx_file_data;
+ u32 dnx_size;
+ u32 fuph_hdr_len;
+};
+
+struct mfld_fw_update {
+ void __iomem *sram;
+ void __iomem *mailbox;
+ u32 wscu;
+ u32 wia;
+ char mb_status[8];
+};
+
+/* Holds size parameters read from fuph header */
+struct fuph_hdr_attrs {
+ u32 mip_size;
+ u32 ifwi_size;
+ u32 psfw1_size;
+ u32 psfw2_size;
+ u32 ssfw_size;
+ u32 sucp_size;
+ u32 vedfw_size;
+};
+
+enum mailbox_status {
+ MB_DONE,
+ MB_CONTINUE,
+ MB_ERROR
+};
+
+/* Misc. firmware components that are part of integrated firmware */
+struct misc_fw {
+ const char *fw_type;
+ u8 str_len;
+};
+
+/* lock used to prevent multiple calls to fw update sysfs interface */
+static DEFINE_MUTEX(fwud_lock);
+
+static char err_buf[50];
+static u8 *pending_data;
+
+struct fw_update_info {
+ struct device *dev;
+ struct fw_ud *fwud_pending;
+};
+
+static struct fw_update_info fui;
+
+static struct misc_fw misc_fw_table[] = {
+ { .fw_type = IFWI, .str_len = MAX_LEN_IFW },
+ { .fw_type = PSFW1, .str_len = MAX_LEN_PSFW },
+ { .fw_type = SSFW, .str_len = MAX_LEN_SSFW },
+ { .fw_type = PSFW2, .str_len = MAX_LEN_PSFW },
+ { .fw_type = SUCP, .str_len = MAX_LEN_SUCP },
+ { .fw_type = VEDFW, .str_len = MAX_LEN_VEDFW }
+};
+
+static int alloc_fota_mem_early;
+
+int __init alloc_mem_fota_early_flag(char *p)
+{
+ alloc_fota_mem_early = 1;
+ return 0;
+}
+early_param("alloc_fota_mem_early", alloc_mem_fota_early_flag);
+
+/*
+ * IA will wait in busy-state, and poll mailbox, to check
+ * if SCU is done processing.
+ * If it has to wait for more than a second, it will exit with
+ * error code.
+ */
+static int busy_wait(struct mfld_fw_update *mfld_fw_upd)
+{
+ u32 count = 0;
+ u32 flag;
+
+ flag = mfld_fw_upd->wscu;
+
+ while (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag
+ && count < 500) {
+ /* There are synchronization issues between IA and SCU */
+ mb();
+ /* FIXME: we must use mdelay currently */
+ mdelay(10);
+ count++;
+ }
+
+ if (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag) {
+ dev_err(fui.dev, "IA-waited and quitting\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* This function will
+ * 1)copy firmware chunk from user-space to kernel-space.
+ * 2) Copy from kernel-space to shared SRAM.
+ * 3) Write to mailbox.
+ * 4) And wait for SCU to process that firmware chunk.
+ * Returns 0 on success, and < 0 for failure.
+ */
+static int process_fw_chunk(u8 *fws, u8 *userptr, u32 chunklen,
+ struct mfld_fw_update *mfld_fw_upd)
+{
+ memcpy(fws, userptr, chunklen);
+
+ /* IA copy to sram */
+ memcpy_toio(mfld_fw_upd->sram, fws, chunklen);
+
+ /* There are synchronization issues between IA and SCU */
+ mb();
+ mfld_fw_upd->wia = !(mfld_fw_upd->wia);
+ iowrite32(mfld_fw_upd->wia, mfld_fw_upd->mailbox + IA_FLAG_OFFSET);
+
+ mb();
+ dev_dbg(fui.dev, "wrote ia_flag=%d\n",
+ ioread32(mfld_fw_upd->mailbox + IA_FLAG_OFFSET));
+
+ mfld_fw_upd->wscu = !mfld_fw_upd->wscu;
+ return busy_wait(mfld_fw_upd);
+}
+
+/*
+ * This function will check mailbox status flag, and return state of mailbox.
+ */
+static enum mailbox_status check_mb_status(struct mfld_fw_update *mfld_fw_upd)
+{
+
+ enum mailbox_status mb_state;
+
+ /* There are synchronization issues between IA and SCU */
+ mb();
+
+ memcpy_fromio(mfld_fw_upd->mb_status, mfld_fw_upd->mailbox, 8);
+
+ if (!strncmp(mfld_fw_upd->mb_status, UPDATE_ERROR,
+ sizeof(UPDATE_ERROR) - 1) ||
+ !strncmp(mfld_fw_upd->mb_status, UPDATE_ABORT,
+ sizeof(UPDATE_ABORT) - 1)) {
+ dev_dbg(fui.dev,
+ "mailbox error=%s\n", mfld_fw_upd->mb_status);
+ return MB_ERROR;
+ } else {
+ mb_state = (!strncmp(mfld_fw_upd->mb_status, UPDATE_DONE,
+ sizeof(UPDATE_DONE) - 1)) ? MB_DONE : MB_CONTINUE;
+ dev_dbg(fui.dev,
+ "mailbox pass=%s, mb_state=%d\n",
+ mfld_fw_upd->mb_status, mb_state);
+ }
+
+ return mb_state;
+}
+
+/* Helper function used to calculate length and offset. */
+int helper_for_calc_offset_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+ void **offset, u32 *len, struct fuph_hdr_attrs *fuph,
+ const char *fw_type)
+{
+ unsigned long chunk_no;
+ u32 chunk_rem;
+ u32 max_chunk_cnt;
+ u32 fw_size;
+ u32 fw_offset;
+ u32 max_fw_chunk_size = MAX_FW_CHUNK;
+
+ if (!strncmp(fw_type, IFWI, strlen(IFWI))) {
+
+ if (kstrtoul(scu_req + strlen(IFWI), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ /* On CTP, IFWx starts from IFW1, not IFW0, thus adjust the
+ * chunk_no to make '*offset' point to the correct address.
+ * Besides, the size of each IFWx chunk is 96k, not 128k
+ */
+ chunk_no = chunk_no - 1;
+ fw_size = fuph->ifwi_size;
+ fw_offset = fuph->mip_size;
+ max_fw_chunk_size = IFWX_CHUNK_SIZE;
+ } else if (!strncmp(fw_type, PSFW1, strlen(PSFW1))) {
+
+ if (kstrtoul(scu_req + strlen(PSFW1), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ fw_size = fuph->psfw1_size;
+ fw_offset = fuph->mip_size + fuph->ifwi_size;
+ } else if (!strncmp(fw_type, PSFW2, strlen(PSFW2))) {
+
+ if (kstrtoul(scu_req + strlen(PSFW2), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ fw_size = fuph->psfw2_size;
+ fw_offset = fuph->mip_size + fuph->ifwi_size +
+ fuph->psfw1_size + fuph->ssfw_size;
+ } else if (!strncmp(fw_type, SSFW, strlen(SSFW))) {
+
+ if (kstrtoul(scu_req + strlen(SSFW), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ fw_size = fuph->ssfw_size;
+ fw_offset = fuph->mip_size + fuph->ifwi_size +
+ fuph->psfw1_size;
+ } else if (!strncmp(fw_type, SUCP, strlen(SUCP))) {
+
+ if (kstrtoul(scu_req + strlen(SUCP), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ fw_size = fuph->sucp_size;
+ fw_offset = SUCP_OFFSET;
+ } else if (!strncmp(fw_type, VEDFW, strlen(VEDFW))) {
+
+ if (kstrtoul(scu_req + strlen(VEDFW), 10, &chunk_no) < 0)
+ return -EINVAL;
+
+ fw_size = fuph->vedfw_size;
+ fw_offset = VEDFW_OFFSET;
+ } else
+ return -EINVAL;
+
+ chunk_rem = fw_size % max_fw_chunk_size;
+ max_chunk_cnt = (fw_size/max_fw_chunk_size) + (chunk_rem ? 1 : 0);
+
+ dev_dbg(fui.dev,
+ "str=%s,chunk_no=%lx, chunk_rem=%d,max_chunk_cnt=%d\n",
+ fw_type, chunk_no, chunk_rem, max_chunk_cnt);
+
+ if ((chunk_no + 1) > max_chunk_cnt)
+ return -EINVAL;
+
+ /* Note::Logic below will make sure, that we get right length if input
+ is 128K or multiple. */
+ *len = (chunk_no == (max_chunk_cnt - 1)) ?
+ (chunk_rem ? chunk_rem : max_fw_chunk_size) : max_fw_chunk_size;
+
+ *offset = fw_ud_ptr->fw_file_data + fw_offset +
+ chunk_no * max_fw_chunk_size;
+
+ return 0;
+}
+
+/*
+ * This api calculates offset and length depending on type of firmware chunk
+ * requested by SCU. Note: Intent is to follow the architecture such that,
+ * SCU controls the flow, and IA simply hands out, what is requested by SCU.
+ * IA will simply follow SCU's commands, unless SCU requests for something
+ * IA cannot give. TODO:That will be a special error case, need to figure out
+ * how to handle that.
+ */
+int calc_offset_and_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+ void **offset, u32 *len, struct fuph_hdr_attrs *fuph)
+{
+ u8 cnt;
+
+ if (!strncmp(DNX_IMAGE, scu_req, strlen(scu_req))) {
+ *offset = fw_ud_ptr->dnx_file_data;
+ *len = fw_ud_ptr->dnx_size;
+ return 0;
+ } else if (!strncmp(FUPH, scu_req, strlen(scu_req))) {
+ *offset = fw_ud_ptr->fw_file_data + fw_ud_ptr->fsize
+ - fw_ud_ptr->fuph_hdr_len;
+ *len = fw_ud_ptr->fuph_hdr_len;
+ return 0;
+ } else if (!strncmp(MIP, scu_req, strlen(scu_req))) {
+ *offset = fw_ud_ptr->fw_file_data + MIP_HEADER_OFFSET;
+ *len = fuph->mip_size;
+ return 0;
+ } else if (!strncmp(LOWER_128K, scu_req, strlen(scu_req))) {
+ *offset = fw_ud_ptr->fw_file_data + fuph->mip_size;
+ *len = MAX_FW_CHUNK;
+ return 0;
+ } else if (!strncmp(UPPER_128K, scu_req, strlen(scu_req))) {
+ *offset = fw_ud_ptr->fw_file_data
+ + fuph->mip_size + MAX_FW_CHUNK;
+ *len = MAX_FW_CHUNK;
+ return 0;
+ } else {
+ for (cnt = 0; cnt < ARRAY_SIZE(misc_fw_table); cnt++) {
+
+ if (!strncmp(misc_fw_table[cnt].fw_type, scu_req,
+ strlen(misc_fw_table[cnt].fw_type))) {
+
+ if (strlen(scu_req) ==
+ misc_fw_table[cnt].str_len) {
+
+ if (helper_for_calc_offset_length
+ (fw_ud_ptr, scu_req,
+ offset, len, fuph,
+ misc_fw_table[cnt].fw_type) < 0)
+ goto error_case;
+
+ dev_dbg(fui.dev,
+ "\nmisc fw type=%s, len=%d,offset=%d",
+ misc_fw_table[cnt].fw_type, *len,
+ (int)*offset);
+
+ return 0;
+
+ } else
+ goto error_case;
+ }
+ }
+
+ }
+
+ dev_dbg(fui.dev, "Unexpected mailbox request from scu\n");
+
+error_case:
+ /* TODO::Need to test this error case..and see how SCU reacts
+ * and how IA handles
+ * subsequent error response and whether exit is graceful...
+ */
+
+ dev_dbg(fui.dev, "error case,respond back to SCU..\n");
+ dev_dbg(fui.dev, "scu_req=%s\n", scu_req);
+ *len = 0;
+ *offset = 0;
+
+ return -EINVAL;
+}
+
+/**
+ * intel_scu_ipc_medfw_upgrade - Medfield Firmware update utility
+ *
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. So we have a different api below
+ * to support Medfield firmware update.
+ *
+ * On success returns 0, for failure , returns < 0.
+ */
+static int intel_scu_ipc_medfw_upgrade(void)
+{
+ struct fw_ud *fw_ud_param = fui.fwud_pending;
+ struct mfld_fw_update mfld_fw_upd;
+ u8 *fw_file_data = NULL;
+ u8 *fws = NULL;
+ u8 *fuph_start = NULL;
+ int ret_val = 0;
+
+ struct fuph_hdr_attrs fuph;
+ u32 length = 0;
+ void *offset;
+ enum mailbox_status mb_state;
+
+ /* set all devices in d0i0 before IFWI upgrade */
+ if (unlikely(pmu_set_devices_in_d0i0())) {
+ pr_debug("pmu: failed to set all devices in d0i0...\n");
+ BUG();
+ }
+
+ rpmsg_global_lock();
+ mfld_fw_upd.wscu = 0;
+ mfld_fw_upd.wia = 0;
+ memset(mfld_fw_upd.mb_status, 0, sizeof(char) * 8);
+
+ fw_file_data = fw_ud_param->fw_file_data;
+ mfld_fw_upd.sram = ioremap_nocache(SRAM_ADDR, MAX_FW_CHUNK);
+ if (mfld_fw_upd.sram == NULL) {
+ dev_err(fui.dev, "unable to map sram\n");
+ ret_val = -ENOMEM;
+ goto out_unlock;
+ }
+
+ mfld_fw_upd.mailbox = ioremap_nocache(MAILBOX_ADDR,
+ sizeof(struct ia_scu_mailbox));
+
+ if (mfld_fw_upd.mailbox == NULL) {
+ dev_err(fui.dev, "unable to map the mailbox\n");
+ ret_val = -ENOMEM;
+ goto unmap_sram;
+ }
+
+ /*IA initializes both IAFlag and SCUFlag to zero */
+ iowrite32(0, mfld_fw_upd.mailbox + SCU_FLAG_OFFSET);
+ iowrite32(0, mfld_fw_upd.mailbox + IA_FLAG_OFFSET);
+ memset_io(mfld_fw_upd.mailbox, 0, 8);
+
+ fws = kmalloc(MAX_FW_CHUNK, GFP_KERNEL);
+ if (fws == NULL) {
+ ret_val = -ENOMEM;
+ goto unmap_mb;
+ }
+
+ /* fuph header start */
+ fuph_start = fw_ud_param->fw_file_data + (fw_ud_param->fsize - 1)
+ - (fw_ud_param->fuph_hdr_len - 1);
+
+ /* Convert sizes in DWORDS to number of bytes. */
+ fuph.mip_size = (*((u32 *)(fuph_start + FUPH_MIP_OFFSET)))*4;
+ fuph.ifwi_size = (*((u32 *)(fuph_start + FUPH_IFWI_OFFSET)))*4;
+ fuph.psfw1_size = (*((u32 *)(fuph_start + FUPH_PSFW1_OFFSET)))*4;
+ fuph.psfw2_size = (*((u32 *)(fuph_start + FUPH_PSFW2_OFFSET)))*4;
+ fuph.ssfw_size = (*((u32 *)(fuph_start + FUPH_SSFW_OFFSET)))*4;
+ fuph.sucp_size = (*((u32 *)(fuph_start + FUPH_SUCP_OFFSET)))*4;
+
+ if (fw_ud_param->fuph_hdr_len == FUPH_HDR_LEN) {
+ fuph.vedfw_size =
+ (*((u32 *)(fuph_start + FUPH_VEDFW_OFFSET)))*4;
+ } else
+ fuph.vedfw_size = 0;
+
+ dev_dbg(fui.dev,
+ "ln=%d, mi=%d, if=%d, ps1=%d, ps2=%d, sfw=%d, sucp=%d, vd=%d\n",
+ fw_ud_param->fuph_hdr_len, fuph.mip_size, fuph.ifwi_size,
+ fuph.psfw1_size, fuph.psfw2_size, fuph.ssfw_size,
+ fuph.sucp_size, fuph.vedfw_size);
+
+ /* TODO_SK::There is just
+ * 1 write required from IA side for DFU.
+ * So commenting this-out, until it gets confirmed */
+ /*ipc_command(IPC_CMD_FW_UPDATE_READY); */
+
+ /*1. DNX SIZE HEADER */
+ memcpy(fws, fw_ud_param->dnx_hdr, DNX_HDR_LEN);
+
+ memcpy_toio(mfld_fw_upd.sram, fws, DNX_HDR_LEN);
+
+ /* There are synchronization issues between IA and SCU */
+ mb();
+
+ /* Write cmd to trigger an interrupt to SCU for firmware update*/
+ ret_val = rpmsg_send_simple_command(fw_update_instance,
+ IPCMSG_FW_UPDATE,
+ IPC_CMD_FW_UPDATE_GO);
+ if (ret_val) {
+ dev_err(fui.dev, "IPC_CMD_FW_UPDATE_GO failed\n");
+ goto term;
+ }
+
+ mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+ if (busy_wait(&mfld_fw_upd) < 0) {
+ ret_val = -1;
+ goto term;
+ }
+
+ /* TODO:Add a count for iteration, based on sizes of security firmware,
+ * so that we determine finite number of iterations to loop thro.
+ * That way at the very least, we can atleast control the number
+ * of iterations, and prevent infinite looping if there are any bugs.
+ * The only catch being for B0, SCU will request twice for each firmware
+ * chunk, since its writing to 2 partitions.
+ * TODO::Investigate if we need to increase timeout for busy_wait,
+ * since SCU is now writing to 2 partitions.
+ */
+
+ while ((mb_state = check_mb_status(&mfld_fw_upd)) != MB_DONE) {
+
+ if (mb_state == MB_ERROR) {
+ dev_dbg(fui.dev, "check_mb_status,error\n");
+ ret_val = -1;
+ goto term;
+ }
+
+ if (!strncmp(mfld_fw_upd.mb_status, FUPH_HDR_SIZE,
+ strlen(FUPH_HDR_SIZE))) {
+ iowrite32(fw_ud_param->fuph_hdr_len, mfld_fw_upd.sram);
+ /* There are synchronization issues between IA-SCU */
+ mb();
+ dev_dbg(fui.dev,
+ "copied fuph hdr size=%d\n",
+ ioread32(mfld_fw_upd.sram));
+ mfld_fw_upd.wia = !mfld_fw_upd.wia;
+ iowrite32(mfld_fw_upd.wia, mfld_fw_upd.mailbox +
+ IA_FLAG_OFFSET);
+ dev_dbg(fui.dev, "ia_flag=%d\n",
+ ioread32(mfld_fw_upd.mailbox + IA_FLAG_OFFSET));
+ mb();
+ mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+ if (busy_wait(&mfld_fw_upd) < 0) {
+ ret_val = -1;
+ goto term;
+ }
+
+ continue;
+ }
+
+ if (calc_offset_and_length(fw_ud_param, mfld_fw_upd.mb_status,
+ &offset, &length, &fuph) < 0) {
+ dev_err(fui.dev,
+ "calc_offset_and_length_error,error\n");
+ ret_val = -1;
+ goto term;
+ }
+
+ if ((process_fw_chunk(fws, offset, length,
+ &mfld_fw_upd)) != 0) {
+ dev_err(fui.dev,
+ "Error processing fw chunk=%s\n",
+ mfld_fw_upd.mb_status);
+ ret_val = -1;
+ goto term;
+ } else
+ dev_dbg(fui.dev,
+ "PASS processing fw chunk=%s\n",
+ mfld_fw_upd.mb_status);
+ }
+ ret_val = intel_scu_ipc_check_status();
+
+term:
+ kfree(fws);
+unmap_mb:
+ iounmap(mfld_fw_upd.mailbox);
+unmap_sram:
+ iounmap(mfld_fw_upd.sram);
+out_unlock:
+ rpmsg_global_unlock();
+ return ret_val;
+}
+
+static void cur_err(const char *err_info)
+{
+ strncpy(err_buf, err_info, sizeof(err_buf) - 1);
+}
+
+static ssize_t write_dnx(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ int ret;
+
+ mutex_lock(&fwud_lock);
+
+ if (!pending_data) {
+ pending_data = vmalloc(FOTA_MEM_SIZE);
+ if (NULL == pending_data) {
+ cur_err("alloc fota memory by sysfs failed\n");
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
+
+ fui.fwud_pending->dnx_file_data = pending_data + IFWI_MAX_SIZE;
+
+ if (unlikely(off >= DNX_MAX_SIZE)) {
+ fui.fwud_pending->dnx_file_data = NULL;
+ cur_err("too large dnx binary stream!");
+ ret = -EFBIG;
+ goto end;
+ }
+
+ memcpy(fui.fwud_pending->dnx_file_data + off, buf, count);
+
+ if (!off)
+ fui.fwud_pending->dnx_size = count;
+ else
+ fui.fwud_pending->dnx_size += count;
+
+ mutex_unlock(&fwud_lock);
+ return count;
+
+end:
+ mutex_unlock(&fwud_lock);
+ return ret;
+}
+
+/* Parses from the end of IFWI, and looks for UPH$,
+ * to determine length of FUPH header
+ */
+static int find_fuph_header_len(unsigned int *len,
+ unsigned char *file_data, unsigned int file_size)
+{
+ int ret = -EINVAL;
+ unsigned char *temp;
+ unsigned int cnt = 0;
+
+ if (!len || !file_data || !file_size) {
+ dev_err(fui.dev, "find_fuph_header_len: Invalid inputs\n");
+ return ret;
+ }
+
+ /* Skipping the checksum at the end, and moving to the
+ * start of the last add-on firmware size in fuph.
+ */
+ temp = file_data + file_size - SKIP_BYTES;
+
+ while (cnt <= FUPH_MAX_LEN) {
+ if (!strncmp(temp, FUPH_STR, sizeof(FUPH_STR) - 1)) {
+ pr_info("Fuph_hdr_len=%d\n", cnt + SKIP_BYTES);
+ *len = cnt + SKIP_BYTES;
+ ret = 0;
+ break;
+ }
+ temp -= 4;
+ cnt += 4;
+ }
+
+ return ret;
+}
+
+static ssize_t write_ifwi(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ int ret;
+
+ mutex_lock(&fwud_lock);
+
+ if (!pending_data) {
+ pending_data = vmalloc(FOTA_MEM_SIZE);
+ if (NULL == pending_data) {
+ cur_err("alloc fota memory by sysfs failed\n");
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
+
+ fui.fwud_pending->fw_file_data = pending_data;
+
+ if (unlikely(off >= IFWI_MAX_SIZE)) {
+ fui.fwud_pending->fw_file_data = NULL;
+ cur_err("too large ifwi binary stream!\n");
+ ret = -EFBIG;
+ goto end;
+ }
+
+ memcpy(fui.fwud_pending->fw_file_data + off, buf, count);
+
+ if (!off)
+ fui.fwud_pending->fsize = count;
+ else
+ fui.fwud_pending->fsize += count;
+
+ mutex_unlock(&fwud_lock);
+ return count;
+
+end:
+ mutex_unlock(&fwud_lock);
+ return ret;
+}
+
+/*
+ * intel_scu_fw_prepare - prepare dnx_hdr and fuph
+ *
+ * This function will be invoked at reboot, when DNX and IFWI data are ready.
+ */
+static int intel_scu_fw_prepare(struct fw_ud *fwud_pending)
+{
+ unsigned int size;
+ unsigned int gpFlags = 0;
+ unsigned int xorcs;
+ unsigned char dnxSH[DNX_HDR_LEN] = { 0 };
+
+ mutex_lock(&fwud_lock);
+
+ size = fui.fwud_pending->dnx_size;
+
+ /* Set GPFlags parameter */
+ gpFlags = gpFlags | (GPF_BIT32 << 31);
+ xorcs = (size ^ gpFlags);
+
+ memcpy((dnxSH + DNX_SIZE_OFFSET), (unsigned char *)(&size), 4);
+ memcpy((dnxSH + GP_FLAG_OFFSET), (unsigned char *)(&gpFlags), 4);
+ memcpy((dnxSH + XOR_CHK_OFFSET), (unsigned char *)(&xorcs), 4);
+
+ /* assign the last DNX_HDR_LEN bytes memory to dnx header */
+ fui.fwud_pending->dnx_hdr = pending_data + FOTA_MEM_SIZE - DNX_HDR_LEN;
+
+ /* directly memcpy to dnx_hdr */
+ memcpy(fui.fwud_pending->dnx_hdr, dnxSH, DNX_HDR_LEN);
+
+ if (find_fuph_header_len(&(fui.fwud_pending->fuph_hdr_len),
+ fui.fwud_pending->fw_file_data,
+ fui.fwud_pending->fsize) < 0) {
+ dev_err(fui.dev, "Error with FUPH header\n");
+ mutex_unlock(&fwud_lock);
+ return -EINVAL;
+ }
+
+ dev_dbg(fui.dev, "fupd_hdr_len=%d, fsize=%d, dnx_size=%d",
+ fui.fwud_pending->fuph_hdr_len, fui.fwud_pending->fsize,
+ fui.fwud_pending->dnx_size);
+
+ mutex_unlock(&fwud_lock);
+ return 0;
+}
+
+int intel_scu_ipc_fw_update(void)
+{
+ int ret = 0;
+
+ /* jump fw upgrade process when fota memory not allocated
+ * or when user cancels update
+ * or when one of dnx and ifwi is not written
+ * or when failure happens in writing one of dnx and ifwi
+ */
+ if (!pending_data || !fui.fwud_pending ||
+ !fui.fwud_pending->dnx_file_data ||
+ !fui.fwud_pending->fw_file_data) {
+ pr_info("Jump FW upgrade process\n");
+ goto end;
+ }
+
+ ret = intel_scu_fw_prepare(fui.fwud_pending);
+ if (ret) {
+ dev_err(fui.dev, "intel_scu_fw_prepare failed\n");
+ goto end;
+ }
+
+ ret = intel_scu_ipc_medfw_upgrade();
+ if (ret)
+ dev_err(fui.dev, "intel_scu_ipc_medfw_upgrade failed\n");
+
+end:
+ return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_fw_update);
+
+static ssize_t fw_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ u8 data[16] = { 0 };
+ int ret;
+ int i;
+ int used = 0;
+
+ ret = rpmsg_send_command(fw_update_instance, IPCMSG_FW_REVISION, 0,
+ NULL, (u32 *)data, 0, 4);
+ if (ret < 0) {
+ cur_err("Error getting fw version");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 16; i++)
+ used += snprintf(buf + used, PAGE_SIZE - used, "%x ", data[i]);
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ ret = rpmsg_send_command(fw_update_instance,
+ IPCMSG_FW_REVISION, 1, NULL, (u32 *)data, 0, 4);
+ if (ret < 0) {
+ cur_err("Error getting fw version");
+ return -EINVAL;
+ }
+ for (i = 0; i < 16; i++)
+ used += snprintf(buf + used, PAGE_SIZE - used,
+ "%x ", data[i]);
+ }
+
+ return used;
+}
+
+static ssize_t last_error_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", err_buf);
+}
+
+static ssize_t cancel_update_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ int value;
+
+ if (sscanf(buf, "%d", &value) != 1) {
+ cur_err("One argument is needed\n");
+ return -EINVAL;
+ }
+
+ if (value == 1) {
+ mutex_lock(&fwud_lock);
+ fui.fwud_pending->fw_file_data = NULL;
+ fui.fwud_pending->dnx_file_data = NULL;
+ mutex_unlock(&fwud_lock);
+ } else {
+ cur_err("input '1' to cancel fw upgrade\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+#define __BIN_ATTR(_name, _mode, _size, _read, _write) { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .size = _size, \
+ .read = _read, \
+ .write = _write, \
+}
+
+#define BIN_ATTR(_name, _mode, _size, _read, _write) \
+struct bin_attribute bin_attr_##_name = \
+ __BIN_ATTR(_name, _mode, _size, _read, _write)
+
+#define KOBJ_FW_UPDATE_ATTR(_name, _mode, _show, _store) \
+ struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static KOBJ_FW_UPDATE_ATTR(cancel_update, S_IWUSR, NULL, cancel_update_store);
+static KOBJ_FW_UPDATE_ATTR(fw_version, S_IRUGO, fw_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(last_error, S_IRUGO, last_error_show, NULL);
+static BIN_ATTR(dnx, S_IWUSR, DNX_MAX_SIZE, NULL, write_dnx);
+static BIN_ATTR(ifwi, S_IWUSR, IFWI_MAX_SIZE, NULL, write_ifwi);
+
+static struct attribute *fw_update_attrs[] = {
+ &cancel_update_attr.attr,
+ &fw_version_attr.attr,
+ &last_error_attr.attr,
+ NULL,
+};
+
+static struct attribute_group fw_update_attr_group = {
+ .name = "fw_info",
+ .attrs = fw_update_attrs,
+};
+
+static int intel_fw_update_sysfs_create(struct kobject *kobj)
+{
+ int ret;
+
+ ret = sysfs_create_group(kobj, &fw_update_attr_group);
+ if (ret) {
+ dev_err(fui.dev, "Unable to export sysfs interface\n");
+ goto out;
+ }
+
+ ret = sysfs_create_bin_file(kobj, &bin_attr_dnx);
+ if (ret) {
+ dev_err(fui.dev, "Unable to create dnx bin file\n");
+ goto err_dnx_bin;
+ }
+
+ ret = sysfs_create_bin_file(kobj, &bin_attr_ifwi);
+ if (ret) {
+ dev_err(fui.dev, "Unable to create ifwi bin file\n");
+ goto err_ifwi_bin;
+ }
+
+ return 0;
+
+err_ifwi_bin:
+ sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+err_dnx_bin:
+ sysfs_remove_group(kobj, &fw_update_attr_group);
+out:
+ return ret;
+}
+
+static void intel_fw_update_sysfs_remove(struct kobject *kobj)
+{
+ sysfs_remove_bin_file(kobj, &bin_attr_ifwi);
+ sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+ sysfs_remove_group(kobj, &fw_update_attr_group);
+}
+
+static int fw_update_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret;
+ struct fw_update_info *fu_info = &fui;
+
+ if (rpdev == NULL) {
+ pr_err("fw_update rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed fw_update rpmsg device\n");
+
+ /* Allocate rpmsg instance for fw_update*/
+ ret = alloc_rpmsg_instance(rpdev, &fw_update_instance);
+ if (!fw_update_instance) {
+ dev_err(&rpdev->dev, "kzalloc fw_update instance failed\n");
+ goto out;
+ }
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(fw_update_instance);
+
+ fu_info->dev = &rpdev->dev;
+
+ fui.fwud_pending = kzalloc(sizeof(struct fw_ud), GFP_KERNEL);
+ if (NULL == fui.fwud_pending) {
+ ret = -ENOMEM;
+ dev_err(fui.dev, "alloc fwud_pending memory failed\n");
+ goto err_fwud_pending;
+ }
+
+ scu_fw_update_kobj = kobject_create_and_add("fw_update", kernel_kobj);
+ if (!scu_fw_update_kobj) {
+ ret = -ENOMEM;
+ dev_err(fui.dev, "create kobject failed\n");
+ goto err_kobj;
+ }
+
+ ret = intel_fw_update_sysfs_create(scu_fw_update_kobj);
+ if (ret) {
+ dev_err(fui.dev, "creating fw update sysfs failed\n");
+ goto err_free_fwud;
+ }
+
+ /* If alloc_fota_mem_early flag is set, allocate FOTA_MEM_SIZE
+ * bytes memory.
+ * reserve the first contiguous IFWI_MAX_SIZE bytes for IFWI,
+ * the next contiguous DNX_MAX_SIZE bytes are reserved for DNX,
+ * the last DNX_HDR_LEN bytes for DNX Header
+ */
+ if (alloc_fota_mem_early) {
+ pending_data = vmalloc(FOTA_MEM_SIZE);
+ if (NULL == pending_data) {
+ ret = -ENOMEM;
+ dev_err(fui.dev, "early alloc fota memory failed\n");
+ goto err_sysfs;
+ }
+ }
+
+ return 0;
+
+err_sysfs:
+ intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+err_free_fwud:
+ kobject_put(scu_fw_update_kobj);
+err_kobj:
+ kfree(fui.fwud_pending);
+ fui.fwud_pending = NULL;
+err_fwud_pending:
+ free_rpmsg_instance(rpdev, &fw_update_instance);
+out:
+ return ret;
+}
+
+static void fw_update_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ free_rpmsg_instance(rpdev, &fw_update_instance);
+ intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+ kobject_put(scu_fw_update_kobj);
+
+ vfree(pending_data);
+ pending_data = NULL;
+ kfree(fui.fwud_pending);
+ fui.fwud_pending = NULL;
+}
+
+static void fw_update_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id fw_update_rpmsg_id_table[] = {
+ { .name = "rpmsg_fw_update" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, fw_update_rpmsg_id_table);
+
+static struct rpmsg_driver fw_update_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = fw_update_rpmsg_id_table,
+ .probe = fw_update_rpmsg_probe,
+ .callback = fw_update_rpmsg_cb,
+ .remove = fw_update_rpmsg_remove,
+};
+
+static int __init fw_update_module_init(void)
+{
+ return register_rpmsg_driver(&fw_update_rpmsg);
+}
+
+static void __exit fw_update_module_exit(void)
+{
+ unregister_rpmsg_driver(&fw_update_rpmsg);
+}
+
+module_init(fw_update_module_init);
+module_exit(fw_update_module_exit);
+
+MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel SCU Firmware Update Driver");
+MODULE_LICENSE("GPL v2");
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/sfi.h>
#include <linux/module.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+enum {
+ SCU_IPC_LINCROFT,
+ SCU_IPC_PENWELL,
+ SCU_IPC_CLOVERVIEW,
+ SCU_IPC_TANGIER,
+};
-/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
-#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+/* intel scu ipc driver data*/
+struct intel_scu_ipc_pdata_t {
+ u32 ipc_base;
+ u32 i2c_base;
+ u32 ipc_len;
+ u32 i2c_len;
+};
-/* Command id associated with message IPCMSG_PCNTRL */
-#define IPC_CMD_PCNTRL_W 0 /* Register write */
-#define IPC_CMD_PCNTRL_R 1 /* Register read */
-#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
+ [SCU_IPC_LINCROFT] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ },
+ [SCU_IPC_PENWELL] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ },
+ [SCU_IPC_CLOVERVIEW] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ },
+ [SCU_IPC_TANGIER] = {
+ .ipc_base = 0xff009000,
+ .i2c_base = 0xff00d000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ },
+};
+static int scu_ipc_pm_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *ignored);
+
+static struct notifier_block scu_ipc_pm_notifier = {
+ .notifier_call = scu_ipc_pm_callback,
+ .priority = 1,
+};
/*
* IPC register summary
* message handler is called within firmware.
*/
-#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
-#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
-#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
-#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
-#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */
-#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */
-
-static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
-static void ipc_remove(struct pci_dev *pdev);
+#define IPC_STATUS_ADDR 0X04
+#define IPC_SPTR_ADDR 0x08
+#define IPC_DPTR_ADDR 0x0C
+#define IPC_READ_BUFFER 0x90
+#define IPC_WRITE_BUFFER 0x80
+#define IPC_IOC 0x100
-struct intel_scu_ipc_dev {
+struct intel_ipc_controller {
struct pci_dev *pdev;
void __iomem *ipc_base;
void __iomem *i2c_base;
+ int ioc;
+ int cmd;
+ struct completion cmd_complete;
};
-static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+static struct intel_ipc_controller ipcdev; /* Only one for now */
+
+static int platform; /* Platform type */
+
+static char *ipc_err_sources[] = {
+ [IPC_ERR_NONE] =
+ "no error",
+ [IPC_ERR_CMD_NOT_SUPPORTED] =
+ "command not supported",
+ [IPC_ERR_CMD_NOT_SERVICED] =
+ "command not serviced",
+ [IPC_ERR_UNABLE_TO_SERVICE] =
+ "unable to service",
+ [IPC_ERR_CMD_INVALID] =
+ "command invalid",
+ [IPC_ERR_CMD_FAILED] =
+ "command failed",
+ [IPC_ERR_EMSECURITY] =
+ "Invalid Battery",
+ [IPC_ERR_UNSIGNEDKERNEL] =
+ "Unsigned kernel",
+};
-static int platform; /* Platform type */
+/* PM Qos struct */
+static struct pm_qos_request *qos;
-/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
- */
-#define IPC_READ_BUFFER 0x90
+/* Suspend status*/
+static bool suspend_status;
+static DEFINE_MUTEX(scu_suspend_lock);
+
+/* Suspend status get */
+bool suspend_in_progress(void)
+{
+ return suspend_status;
+}
+
+/* Suspend status set */
+void set_suspend_status(bool status)
+{
+ mutex_lock(&scu_suspend_lock);
+ suspend_status = status;
+ mutex_unlock(&scu_suspend_lock);
+}
-#define IPC_I2C_CNTRL_ADDR 0
-#define I2C_DATA_ADDR 0x04
+/* IPC PM notifier callback */
+static int scu_ipc_pm_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *ignored)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ set_suspend_status(true);
+ return NOTIFY_OK;
+ case PM_POST_SUSPEND:
+ set_suspend_status(false);
+ return NOTIFY_OK;
+ }
-static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+ return NOTIFY_DONE;
+}
/*
* Command Register (Write Only):
* Format:
* |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
*/
-static inline void ipc_command(u32 cmd) /* Send ipc command */
+void intel_scu_ipc_send_command(u32 cmd) /* Send ipc command */
{
- writel(cmd, ipcdev.ipc_base);
+ ipcdev.cmd = cmd;
+ INIT_COMPLETION(ipcdev.cmd_complete);
+
+ if (system_state == SYSTEM_RUNNING && !suspend_in_progress()) {
+ ipcdev.ioc = 1;
+ writel(cmd | IPC_IOC, ipcdev.ipc_base);
+ } else {
+ ipcdev.ioc = 0;
+ writel(cmd, ipcdev.ipc_base);
+ }
}
/*
*/
static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
{
- writel(data, ipcdev.ipc_base + 0x80 + offset);
+ writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
* |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
*/
-static inline u8 ipc_read_status(void)
+static inline u32 ipc_read_status(void)
{
- return __raw_readl(ipcdev.ipc_base + 0x04);
+ return __raw_readl(ipcdev.ipc_base + IPC_STATUS_ADDR);
}
static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
}
-static inline int busy_loop(void) /* Wait till scu status is busy */
+int intel_scu_ipc_check_status(void)
{
- u32 status = 0;
- u32 loop_count = 0;
-
- status = ipc_read_status();
- while (status & 1) {
- udelay(1); /* scu processing time is in few u secods */
- status = ipc_read_status();
- loop_count++;
- /* break if scu doesn't reset busy bit after huge retry */
- if (loop_count > 100000) {
- dev_err(&ipcdev.pdev->dev, "IPC timed out");
- return -ETIMEDOUT;
- }
- }
- if ((status >> 1) & 1)
- return -EIO;
-
- return 0;
-}
-
-/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
-static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
-{
- int nc;
- u32 offset = 0;
- int err;
- u8 cbuf[IPC_WWBUF_SIZE] = { };
- u32 *wbuf = (u32 *)&cbuf;
-
- mutex_lock(&ipclock);
-
- memset(cbuf, 0, sizeof(cbuf));
-
- if (ipcdev.pdev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
-
- for (nc = 0; nc < count; nc++, offset += 2) {
- cbuf[offset] = addr[nc];
- cbuf[offset + 1] = addr[nc] >> 8;
+ int i;
+ int ret = 0;
+ int status;
+ int loop_count = 3000000;
+
+ if (ipcdev.ioc && (system_state == SYSTEM_RUNNING) &&
+ (!suspend_in_progress())) {
+ if (0 == wait_for_completion_timeout(
+ &ipcdev.cmd_complete, 3 * HZ))
+ ret = -ETIMEDOUT;
+ } else {
+ while ((ipc_read_status() & 1) && --loop_count)
+ udelay(1);
+ if (loop_count == 0)
+ ret = -ETIMEDOUT;
}
- if (id == IPC_CMD_PCNTRL_R) {
- for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
- ipc_data_writel(wbuf[nc], offset);
- ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op);
- } else if (id == IPC_CMD_PCNTRL_W) {
- for (nc = 0; nc < count; nc++, offset += 1)
- cbuf[offset] = data[nc];
- for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
- ipc_data_writel(wbuf[nc], offset);
- ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op);
- } else if (id == IPC_CMD_PCNTRL_M) {
- cbuf[offset] = data[0];
- cbuf[offset + 1] = data[1];
- ipc_data_writel(wbuf[0], 0); /* Write wbuff */
- ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
+ status = ipc_read_status();
+ if (ret == -ETIMEDOUT)
+ dev_err(&ipcdev.pdev->dev,
+ "IPC timed out, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+ status, ipcdev.cmd);
+
+ if (status & 0x2) {
+ ret = -EIO;
+ i = (status >> 16) & 0xFF;
+ if (i < ARRAY_SIZE(ipc_err_sources))
+ dev_err(&ipcdev.pdev->dev,
+ "IPC failed: %s, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+ ipc_err_sources[i], status, ipcdev.cmd);
+ else
+ dev_err(&ipcdev.pdev->dev,
+ "IPC failed: unknown error, IPC_STS=0x%x, "
+ "IPC_CMD=0x%x\n", status, ipcdev.cmd);
+ if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
+ ret = -EACCES;
}
- err = busy_loop();
- if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
- /* Workaround: values are read as 0 without memcpy_fromio */
- memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
- for (nc = 0; nc < count; nc++)
- data[nc] = ipc_data_readb(nc);
- }
- mutex_unlock(&ipclock);
- return err;
-}
-
-/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read byte
- *
- * Read a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data)
-{
- return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread8);
-
-/**
- * intel_scu_ipc_ioread16 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read word
- *
- * Read a register pair. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
- u16 x[2] = {addr, addr + 1 };
- return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- * intel_scu_ipc_ioread32 - read a dword via the SCU
- * @addr: register on SCU
- * @data: return pointer for read dword
- *
- * Read four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
- * @addr: register on SCU
- * @data: byte to write
- *
- * Write a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data)
-{
- return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+ return ret;
}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
-/**
- * intel_scu_ipc_iowrite16 - write a word via the SCU
- * @addr: register on SCU
- * @data: word to write
- *
- * Write two registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
+void intel_scu_ipc_lock(void)
{
- u16 x[2] = {addr, addr + 1 };
- return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
+ /* Prevent C-states beyond C6 */
+ pm_qos_update_request(qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
-/**
- * intel_scu_ipc_iowrite32 - write a dword via the SCU
- * @addr: register on SCU
- * @data: dword to write
- *
- * Write four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
+ /* Prevent S3 */
+ mutex_lock(&scu_suspend_lock);
-/**
- * intel_scu_ipc_readvv - read a set of registers
- * @addr: register list
- * @data: bytes to return
- * @len: length of array
- *
- * Read registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
-{
- return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
-EXPORT_SYMBOL(intel_scu_ipc_readv);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_lock);
-/**
- * intel_scu_ipc_writev - write a set of registers
- * @addr: register list
- * @data: bytes to write
- * @len: length of array
- *
- * Write registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
- *
- */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+void intel_scu_ipc_unlock(void)
{
- return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_writev);
+ /* Re-enable S3 */
+ mutex_unlock(&scu_suspend_lock);
-
-/**
- * intel_scu_ipc_update_register - r/m/w a register
- * @addr: register address
- * @bits: bits to update
- * @mask: mask of bits to update
- *
- * Read-modify-write power control unit register. The first data argument
- * must be register value and second is mask value
- * mask is a bitmap that indicates which bits to update.
- * 0 = masked. Don't modify this bit, 1 = modify this bit.
- * returns 0 on success or an error code.
- *
- * This function may sleep. Locking between SCU accesses is handled
- * for the caller.
- */
-int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
-{
- u8 data[2] = { bits, mask };
- return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+ /* Re-enable Deeper C-states beyond C6 */
+ pm_qos_update_request(qos, PM_QOS_DEFAULT_VALUE);
}
-EXPORT_SYMBOL(intel_scu_ipc_update_register);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_unlock);
/**
- * intel_scu_ipc_simple_command - send a simple command
+ * intel_scu_ipc_simple_command - send a simple command
* @cmd: command
* @sub: sub type
*
{
int err;
- mutex_lock(&ipclock);
- if (ipcdev.pdev == NULL) {
- mutex_unlock(&ipclock);
+ if (ipcdev.pdev == NULL)
return -ENODEV;
- }
- ipc_command(sub << 12 | cmd);
- err = busy_loop();
- mutex_unlock(&ipclock);
+
+ intel_scu_ipc_lock();
+ intel_scu_ipc_send_command(sub << 12 | cmd);
+ err = intel_scu_ipc_check_status();
+ intel_scu_ipc_unlock();
return err;
}
EXPORT_SYMBOL(intel_scu_ipc_simple_command);
/**
- * intel_scu_ipc_command - command with data
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in dwords
- * @out: output data
- * @outlein: output length in dwords
- *
- * Issue a command to the SCU which involves data transfers. Do the
- * data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_raw_cmd - raw ipc command with data
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret
+ * Note: This function should be called with the holding of ipclock
*/
-
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen)
+int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
{
int i, err;
+ u32 wbuf[4] = { 0 };
- mutex_lock(&ipclock);
- if (ipcdev.pdev == NULL) {
- mutex_unlock(&ipclock);
+ if (ipcdev.pdev == NULL)
return -ENODEV;
- }
- for (i = 0; i < inlen; i++)
- ipc_data_writel(*in++, 4 * i);
-
- ipc_command((inlen << 16) | (sub << 12) | cmd);
- err = busy_loop();
+ if (inlen > 16)
+ return -EINVAL;
+
+ memcpy(wbuf, in, inlen);
+
+ writel(dptr, ipcdev.ipc_base + IPC_DPTR_ADDR);
+ writel(sptr, ipcdev.ipc_base + IPC_SPTR_ADDR);
+
+ /**
+ * SRAM controller doesn't support 8bit write, it only supports
+ * 32bit write, so we have to write into the WBUF in 32bit,
+ * and SCU FW will use the inlen to determine the actual input
+ * data length in the WBUF.
+ */
+ for (i = 0; i < ((inlen + 3) / 4); i++)
+ ipc_data_writel(wbuf[i], 4 * i);
+
+ /**
+ * Watchdog IPC command is an exception here using double word
+ * as the unit of input data size because of historical reasons
+ * and SCU FW is doing so.
+ */
+ if ((cmd & 0xFF) == IPCMSG_WATCHDOG_TIMER)
+ inlen = (inlen + 3) / 4;
+ /*
+ * In case of 3 pmic writes or read-modify-writes
+ * there are holes in the middle of the buffer which are
+ * ignored by SCU. These bytes should not be included into
+ * size of the ipc msg. Holes are as follows:
+ * write: wbuf[6 & 7]
+ * read-modifu-write: wbuf[6 & 7 & 11]
+ */
+ else if ((cmd & 0xFF) == IPCMSG_PCNTRL) {
+ if (sub == IPC_CMD_PCNTRL_W && inlen == 11)
+ inlen -= 2;
+ else if (sub == IPC_CMD_PCNTRL_M && inlen == 15)
+ inlen -= 3;
+ }
+ intel_scu_ipc_send_command((inlen << 16) | (sub << 12) | cmd);
+ err = intel_scu_ipc_check_status();
for (i = 0; i < outlen; i++)
*out++ = ipc_data_readl(4 * i);
- mutex_unlock(&ipclock);
return err;
}
-EXPORT_SYMBOL(intel_scu_ipc_command);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_cmd);
-/*I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ 2 /* I2C Read command */
-
-/**
- * intel_scu_ipc_i2c_cntrl - I2C read/write operations
- * @addr: I2C address + command bits
- * @data: data to read/write
- *
- * Perform an an I2C read/write operation via the SCU. All locking is
- * handled for the caller. This function may sleep.
- *
- * Returns an error code or 0 on success.
- *
- * This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
+int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+ u32 *out, u32 outlen)
{
- u32 cmd = 0;
-
- mutex_lock(&ipclock);
- if (ipcdev.pdev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- cmd = (addr >> 24) & 0xFF;
- if (cmd == IPC_I2C_READ) {
- writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
- /* Write not getting updated without delay */
- mdelay(1);
- *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
- } else if (cmd == IPC_I2C_WRITE) {
- writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
- mdelay(1);
- writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
- } else {
- dev_err(&ipcdev.pdev->dev,
- "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
- mutex_unlock(&ipclock);
- return -EIO;
- }
- mutex_unlock(&ipclock);
- return 0;
+ int ret;
+ intel_scu_ipc_lock();
+ ret = intel_scu_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
+ intel_scu_ipc_unlock();
+ return ret;
}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_command);
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
- * which in turn unlocks the caller api. Currently this is not used
+ * which in turn unlocks the caller api.
*
* This is edge triggered so we need take no action to clear anything
*/
static irqreturn_t ioc(int irq, void *dev_id)
{
+ complete(&ipcdev.cmd_complete);
return IRQ_HANDLED;
}
/**
- * ipc_probe - probe an Intel SCU IPC
+ * ipc_probe - probe an Intel SCU IPC
* @dev: the PCI device matching
* @id: entry in the match table
*
*/
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err;
+ int err, pid;
+ struct intel_scu_ipc_pdata_t *pdata;
resource_size_t pci_resource;
if (ipcdev.pdev) /* We support only one SCU */
return -EBUSY;
+ pid = id->driver_data;
+ pdata = &intel_scu_ipc_pdata[pid];
+
ipcdev.pdev = pci_dev_get(dev);
err = pci_enable_device(dev);
if (!pci_resource)
return -ENOMEM;
- if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
+ init_completion(&ipcdev.cmd_complete);
+
+ if (request_irq(dev->irq, ioc, IRQF_NO_SUSPEND, "intel_scu_ipc",
+ &ipcdev))
return -EBUSY;
- ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
+ ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
if (!ipcdev.ipc_base)
return -ENOMEM;
- ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
+ ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
if (!ipcdev.i2c_base) {
iounmap(ipcdev.ipc_base);
return -ENOMEM;
}
/**
- * ipc_remove - remove a bound IPC device
+ * ipc_remove - remove a bound IPC device
* @pdev: PCI device
*
* In practice the SCU is not removable but this function is also
}
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
+ {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
+ {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
+ {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
+ {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
{ 0,}
};
MODULE_DEVICE_TABLE(pci, pci_ids);
.remove = ipc_remove,
};
-
-static int __init intel_scu_ipc_init(void)
+static int intel_scu_ipc_init(void)
{
- platform = mrst_identify_cpu();
+ platform = intel_mid_identify_cpu();
if (platform == 0)
return -ENODEV;
+
+ qos = kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+ if (!qos)
+ return -ENOMEM;
+
+ pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+ register_pm_notifier(&scu_ipc_pm_notifier);
+
return pci_register_driver(&ipc_driver);
}
static void __exit intel_scu_ipc_exit(void)
{
+ pm_qos_remove_request(qos);
+
pci_unregister_driver(&ipc_driver);
}
MODULE_DESCRIPTION("Intel SCU IPC driver");
MODULE_LICENSE("GPL");
-module_init(intel_scu_ipc_init);
+fs_initcall(intel_scu_ipc_init);
module_exit(intel_scu_ipc_exit);
*
* (C) Copyright 2008-2010 Intel Corporation
* Author: Sreedhara DS (sreedhara.ds@intel.com)
+ * (C) Copyright 2010 Intel Corporation
+ * Author: Sudha Krishnakumar (sudha.krishnakumar@intel.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
-static int major;
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
-/* ioctl commnds */
-#define INTE_SCU_IPC_REGISTER_READ 0
-#define INTE_SCU_IPC_REGISTER_WRITE 1
-#define INTE_SCU_IPC_REGISTER_UPDATE 2
+#define MAX_FW_SIZE 264192
+
+#define PMIT_RESET1_OFFSET 14
+#define PMIT_RESET2_OFFSET 15
+
+#define IPC_RESIDENCY_CMD_ID_START 0
+#define IPC_RESIDENCY_CMD_ID_DUMP 2
+
+#define SRAM_ADDR_S0IX_RESIDENCY 0xFFFF71E0
+#define ALL_RESIDENCY_DATA_SIZE 12
+
+#define DUMP_OSNIB
+
+#define OSHOB_EXTEND_DESC_SIZE 52 /* OSHOB header+osnib+oem info: 52 bytes.*/
+
+#define OSHOB_HEADER_MAGIC_SIZE 4 /* Size (bytes) of magic number in OSHOB */
+ /* header. */
+
+#define OSHOB_MAGIC_NUMBER "$OH$" /* If found when reading the first */
+ /* 4 bytes of the OSOHB zone, it */
+ /* means that the new extended OSHOB */
+ /* is going to be used. */
+
+#define OSHOB_REV_MAJ_DEFAULT 0 /* Default revision number of OSHOB. */
+#define OSHOB_REV_MIN_DEFAULT 1 /* If 0.1 the default OSHOB is used */
+ /* instead of the extended one. */
+
+/* Defines for the SCU buffer included in OSHOB structure. */
+#define OSHOB_SCU_BUF_BASE_DW_SIZE 1 /* In dwords. By default SCU */
+ /* buffer size is 1 dword. */
+
+#define OSHOB_SCU_BUF_MRFLD_DW_SIZE (4*OSHOB_SCU_BUF_BASE_DW_SIZE)
+ /* In dwords. On Merrifield the */
+ /* SCU trace buffer size is */
+ /* 4 dwords. */
+#define OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE 50 /* In DWORDS. For Merrifield.*/
+ /* Fabric error log size (in DWORDS).*/
+ /* From offs 0x44 to 0x10C. */
+ /* Used in default OSHOB. */
+
+#define OSNIB_SIZE 32 /* Size (bytes) of the default OSNIB.*/
+
+#define OSNIB_INTEL_RSVD_SIZE 24 /* Size (bytes) of Intel RESERVED in */
+ /* OSNIB. */
+#define OSNIB_OEM_RSVD_SIZE 96 /* Size (bytes) of OEM RESERVED */
+ /* in OSNIB. */
+
+#define OSNIB_NVRAM_SIZE 128 /* Size (bytes) of NVRAM */
+ /* in OSNIB. */
+
+#define OSHOB_DEF_FABRIC_ERR_SIZE 50 /* In DWORDS. */
+ /* Fabric error log size (in DWORDS).*/
+ /* From offs 0x44 to 0x10C. */
+ /* Used in default OSHOB. */
+
+#define OSHOB_FABRIC_ERROR1_SIZE 12 /* 1st part of Fabric error dump. */
+ /* Used in extended OSHOB. */
+
+#define OSHOB_FABRIC_ERROR2_SIZE 9 /* 2nd part of Fabric error dump. */
+ /* Used in extended OSHOB. */
+
+#define OSHOB_RESERVED_DEBUG_SIZE 5 /* Reserved for debug */
+
+/* Size (bytes) of the default OSHOB structure. Includes the default OSNIB */
+/* size. */
+#define OSHOB_SIZE (68 + (4*OSHOB_SCU_BUF_BASE_DW_SIZE) + \
+ (4*OSHOB_DEF_FABRIC_ERR_SIZE)) /* In bytes. */
+
+#define OSHOB_MRFLD_SIZE (68 + (4*OSHOB_SCU_BUF_MRFLD_DW_SIZE) + \
+ (4*OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE))/* In bytes. */
+
+/* SCU buffer size is give in dwords. So it is x4 here to get the total */
+/* number of bytes. */
+
+#define SCU_TRACE_HEADER_SIZE 16 /* SCU trace header */
+
+#define CHAABI_DEBUG_DATA_SIZE 5 /* Reserved for chaabi debug */
+
+#define OSHOB_RESERVED_SIZE 184 /* Reserved */
+
+
+struct chip_reset_event {
+ int id;
+ const char *reset_ev1_name;
+ const char *reset_ev2_name;
+};
+
+static struct chip_reset_event chip_reset_events[] = {
+ { INTEL_MID_CPU_CHIP_TANGIER, "RESETSRC0", "RESETSRC1" },
+ { INTEL_MID_CPU_CHIP_CLOVERVIEW, "RESETIRQ1", "RESETIRQ2" },
+ { INTEL_MID_CPU_CHIP_PENWELL, "RESETIRQ1", "RESETIRQ2" },
+};
+
+struct osnib_target_os {
+ const char *target_os_name;
+ int id;
+};
+
+static struct osnib_target_os osnib_target_oses[] = {
+ { "main", SIGNED_MOS_ATTR },
+ { "charging", SIGNED_COS_ATTR },
+ { "recovery", SIGNED_RECOVERY_ATTR },
+ { "fastboot", SIGNED_POS_ATTR },
+ { "factory", SIGNED_FACTORY_ATTR },
+};
+
+
+struct osnib_wake_src {
+ u8 id;
+ const char *wakesrc_name;
+};
+
+static struct osnib_wake_src osnib_wake_srcs[] = {
+ { WAKE_BATT_INSERT, "battery inserted" },
+ { WAKE_PWR_BUTTON_PRESS, "power button pressed" },
+ { WAKE_RTC_TIMER, "rtc timer" },
+ { WAKE_USB_CHRG_INSERT, "usb charger inserted" },
+ { WAKE_RESERVED, "reserved" },
+ { WAKE_REAL_RESET, "real reset" },
+ { WAKE_COLD_BOOT, "cold boot" },
+ { WAKE_UNKNOWN, "unknown" },
+ { WAKE_KERNEL_WATCHDOG_RESET, "kernel watchdog reset" },
+ { WAKE_SECURITY_WATCHDOG_RESET, "security watchdog reset" },
+ { WAKE_WATCHDOG_COUNTER_EXCEEDED, "watchdog counter exceeded" },
+ { WAKE_POWER_SUPPLY_DETECTED, "power supply detected" },
+ { WAKE_FASTBOOT_BUTTONS_COMBO, "fastboot combo" },
+ { WAKE_NO_MATCHING_OSIP_ENTRY, "no matching osip entry" },
+ { WAKE_CRITICAL_BATTERY, "critical battery" },
+ { WAKE_INVALID_CHECKSUM, "invalid checksum" },
+ { WAKE_FORCED_RESET, "forced reset"},
+ { WAKE_ACDC_CHRG_INSERT, "ac charger inserted" },
+ { WAKE_PMIC_WATCHDOG_RESET, "pmic watchdog reset" },
+ { WAKE_PLATFORM_WATCHDOG_RESET, "HWWDT reset platform" },
+ { WAKE_SC_WATCHDOG_RESET, "HWWDT reset SC" },
+};
+
+
+/* OSNIB allocation. */
+struct scu_ipc_osnib {
+ u8 target_mode; /* Target mode. */
+ u8 wd_count; /* Software watchdog. */
+ u8 alarm; /* RTC alarm. */
+ u8 wakesrc; /* WAKESRC. */
+ u8 reset_ev1; /* RESETIRQ1 or RESETSRC0. */
+ u8 reset_ev2; /* RESETIRQ2 or RESETSRC1. */
+ u8 spare; /* Spare. */
+ u8 intel_reserved[OSNIB_INTEL_RSVD_SIZE]; /* INTEL RESERVED */
+ /* (offsets 7 to 30). */
+ u8 checksum; /* CHECKSUM. */
+ u8 oem_reserved[OSNIB_OEM_RSVD_SIZE]; /* OEM RESERVED */
+ /* (offsets 32 to 127). */
+ u8 nvram[OSNIB_NVRAM_SIZE]; /* NVRAM */
+ /* (offsets 128 to 255). */
+};
+
+/* Default OSHOB allocation. */
+struct scu_ipc_oshob {
+ u32 scutxl; /* SCUTxl offset position. */
+ u32 iatxl; /* IATxl offset. */
+ u32 bocv; /* BOCV offset. */
+ u8 osnibr[OSNIB_SIZE]; /* OSNIB area offset. */
+ u32 pmit; /* PMIT offset. */
+ u32 pemmcmhki; /* PeMMCMHKI offset. */
+ u32 osnibw_ptr; /* OSNIB Write at offset 0x34. */
+ u32 fab_err_log[OSHOB_DEF_FABRIC_ERR_SIZE]; /* Fabric */
+ /* error log buffer. */
+};
+
+/* Extended OSHOB allocation. version 1.3 */
+struct scu_ipc_oshob_extend {
+ u32 magic; /* MAGIC number. */
+ u8 rev_major; /* Revision major. */
+ u8 rev_minor; /* Revision minor. */
+ u16 oshob_size; /* OSHOB size. */
+ u32 nvram_addr; /* NVRAM phys addres */
+ u32 scutxl; /* SCUTxl offset position. */
+ /* If on MRFLD platform, next param may be */
+ /* shifted by */
+ /* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+ u32 iatxl; /* IATxl. */
+ u32 bocv; /* BOCV. */
+
+ u16 intel_size; /* Intel size (in OSNIB area). */
+ u16 oem_size; /* OEM size (of OEM area). */
+ u32 r_intel_ptr; /* Read Intel pointer. */
+ u32 w_intel_ptr; /* Write Intel pointer. */
+ u32 r_oem_ptr; /* Read OEM pointer. */
+ u32 w_oem_ptr; /* Write OEM pointer. */
+
+ u32 pmit; /* PMIT. */
+ u32 pemmcmhki; /* PeMMCMHKI. */
+
+ /* OSHOB as defined for CLOVERVIEW */
+ u32 nvram_size; /* NVRAM max size in bytes */
+ u32 fabricerrlog1[OSHOB_FABRIC_ERROR1_SIZE]; /* fabric error data */
+ u8 vrtc_alarm_dow; /* Alarm sync */
+ u8 vrtc_alarm_dom; /* Alarm sync */
+ u8 vrtc_alarm_month; /* Alarm sync */
+ u8 vrtc_alarm_year; /* Alarm sync */
+ u32 reserved_debug[OSHOB_RESERVED_DEBUG_SIZE];/* Reserved Debug data */
+ u32 reserved2; /* Reserved */
+ u32 fabricerrlog2[OSHOB_FABRIC_ERROR2_SIZE]; /* fabric error data2 */
+ u32 sculogbufferaddr; /* phys addr of scu log buffer */
+ u32 sculogbuffersize; /* size of scu log buffer */
+};
+
+/* Extended OSHOB allocation. version 1.4. */
+struct scu_ipc_oshob_extend_v14 {
+ u32 magic; /* MAGIC number. */
+ u8 rev_major; /* Revision major. */
+ u8 rev_minor; /* Revision minor. */
+ u16 oshob_size; /* OSHOB size. */
+
+ u32 scutxl; /* SCUTxl offset position. */
+ /* If on MRFLD platform, next param may be */
+ /* shifted by */
+ /* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+ u32 iatxl; /* IATxl. */
+ u32 bocv; /* BOCV. */
+
+ u32 osnib_ptr; /* The unique OSNIB pointer. */
+
+ u32 pmit; /* PMIT. */
+ u8 scutraceheader[SCU_TRACE_HEADER_SIZE]; /* SCU trace header */
+ u32 fabricerrlog[OSHOB_DEF_FABRIC_ERR_SIZE]; /* fabric error data */
+ u32 chaabidebugdata[CHAABI_DEBUG_DATA_SIZE]; /* fabric error data */
+ u32 pmuemergency; /* pmu emergency */
+ u32 sculogbufferaddr; /* scu log buffer address */
+ u32 sculogbuffersize; /* size of scu log buffer */
+ u32 oshob_reserved[OSHOB_RESERVED_SIZE]; /* oshob reserved */
+};
+
+struct scu_ipc_oshob_info {
+ __u32 oshob_base; /* Base address of OSHOB. Use ioremap to */
+ /* remap for access. */
+ __u8 oshob_majrev; /* Major revision number of OSHOB structure. */
+ __u8 oshob_minrev; /* Minor revision number of OSHOB structure. */
+ __u16 oshob_size; /* Total size (bytes) of OSHOB structure. */
+ __u32 scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4]; /* SCU trace buffer.*/
+ /* Set to max SCU buffer size (dwords) to */
+ /* adapt to MRFLD. On other platforms, only */
+ /* the first dword is stored and read. */
+ __u32 ia_trace; /* IA trace buffer. */
+ __u16 osnib_size; /* Total size (bytes) of OSNIB structure. */
+ __u16 oemnib_size; /* Total size (bytes) of OEMNIB area. */
+ __u32 osnibr_ptr; /* Pointer to Intel read zone. */
+ __u32 osnibw_ptr; /* Pointer to Intel write zone. */
+ __u32 oemnibr_ptr; /* Pointer to OEM read zone. */
+ __u32 oemnibw_ptr; /* Pointer to OEM write zone. */
+ __u32 scu_trace_buf; /* SCU extended trace buffer */
+ __u32 scu_trace_size; /* SCU extended trace buffer size */
+ __u32 nvram_addr; /* NV ram phys addr */
+ __u32 nvram_size; /* NV ram size in bytes */
+
+ int (*scu_ipc_write_osnib)(u8 *data, int len, int offset);
+ int (*scu_ipc_read_osnib)(u8 *data, int len, int offset);
+
+ int platform_type; /* Identifies the platform (list of supported */
+ /* platforms is given in intel-mid.h). */
+
+ u16 offs_add; /* The additional shift bytes to consider */
+ /* giving the offset at which the OSHOB params*/
+ /* will be read. If MRFLD it must be set to */
+ /* take into account the extra SCU dwords. */
-struct scu_ipc_data {
- u32 count; /* No. of registers */
- u16 addr[5]; /* Register addresses */
- u8 data[5]; /* Register data */
- u8 mask; /* Valid for read-modify-write */
};
+/* Structure for OSHOB info */
+struct scu_ipc_oshob_info *oshob_info;
+
+static struct rpmsg_instance *ipcutil_instance;
+
+/* Mode for Audio clock */
+static DEFINE_MUTEX(osc_clk0_lock);
+static unsigned int osc_clk0_mode;
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz)
+{
+ /* SCU IPC COMMAND(osc clk on/off) definition:
+ * ipc_wbuf[0] = clock to act on {0, 1, 2, 3}
+ * ipc_wbuf[1] =
+ * bit 0 - 1:on 0:off
+ * bit 1 - if 1, read divider setting from bits 3:2 as follows:
+ * bit [3:2] - 00: clk/1, 01: clk/2, 10: clk/4, 11: reserved
+ */
+ unsigned int base_freq;
+ unsigned int div;
+ u8 ipc_wbuf[2];
+ int ipc_ret;
+
+ if (clk > 3)
+ return -EINVAL;
+
+ ipc_wbuf[0] = clk;
+ ipc_wbuf[1] = 0;
+ if (khz) {
+#ifdef CONFIG_CTP_CRYSTAL_38M4
+ base_freq = 38400;
+#else
+ base_freq = 19200;
+#endif
+ div = fls(base_freq / khz) - 1;
+ if (div >= 3 || (1 << div) * khz != base_freq)
+ return -EINVAL; /* Allow only exact frequencies */
+ ipc_wbuf[1] = 0x03 | (div << 2);
+ }
+
+ ipc_ret = rpmsg_send_command(ipcutil_instance,
+ RP_OSC_CLK_CTRL, 0, ipc_wbuf, NULL, 2, 0);
+ if (ipc_ret != 0)
+ pr_err("%s: failed to set osc clk(%d) output\n", __func__, clk);
+
+ return ipc_ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_osc_clk);
+
+/*
+ * OSC_CLK_AUDIO is connected to the MSIC as well as Audience, so it should be
+ * turned on if any one of them requests it to be on and it should be turned off
+ * only if no one needs it on.
+ */
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode)
+{
+ int ret = 0, clk_enable;
+ static const unsigned int clk_khz = 19200;
+
+ pr_info("set_clk0 request %s for Mode 0x%x\n",
+ enable ? "ON" : "OFF", mode);
+ mutex_lock(&osc_clk0_lock);
+ if (mode == CLK0_QUERY) {
+ ret = osc_clk0_mode;
+ goto out;
+ }
+ if (enable) {
+ /* if clock is already on, just add new user */
+ if (osc_clk0_mode) {
+ osc_clk0_mode |= mode;
+ goto out;
+ }
+ osc_clk0_mode |= mode;
+ pr_info("set_clk0: enabling clk, mode 0x%x\n", osc_clk0_mode);
+ clk_enable = 1;
+ } else {
+ osc_clk0_mode &= ~mode;
+ pr_info("set_clk0: disabling clk, mode 0x%x\n", osc_clk0_mode);
+ /* others using the clock, cannot turn it of */
+ if (osc_clk0_mode)
+ goto out;
+ clk_enable = 0;
+ }
+ pr_info("configuring OSC_CLK_AUDIO now\n");
+ ret = intel_scu_ipc_osc_clk(OSC_CLK_AUDIO, clk_enable ? clk_khz : 0);
+out:
+ mutex_unlock(&osc_clk0_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_set_osc_clk0);
+
+#define MSIC_VPROG1_CTRL 0xD6
+#define MSIC_VPROG2_CTRL 0xD7
+
+#define MSIC_VPROG2_ON 0x36 /*1.200V and Auto mode*/
+#define MSIC_VPROG1_ON 0xF6 /*2.800V and Auto mode*/
+#define MSIC_VPROG_OFF 0x24 /*1.200V and OFF*/
+
+/* Defines specific of MRFLD platform (CONFIG_X86_MRFLD). */
+#define MSIC_VPROG1_MRFLD_CTRL 0xAC
+#define MSIC_VPROG2_MRFLD_CTRL 0xAD
+
+#define MSIC_VPROG1_MRFLD_ON 0xC1 /* 2.80V */
+#define MSIC_VPROG2_MRFLD_ON 0xC1 /* 2.80V */
+#define MSIC_VPROG_MRFLD_OFF 0 /* OFF */
+/* End of MRFLD specific.*/
+
+/* Helpers to turn on/off msic vprog1 and vprog2 */
+int intel_scu_ipc_msic_vprog1(int on)
+{
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ return intel_scu_ipc_iowrite8(MSIC_VPROG1_MRFLD_CTRL,
+ on ? MSIC_VPROG1_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+ else
+ return intel_scu_ipc_iowrite8(MSIC_VPROG1_CTRL,
+ on ? MSIC_VPROG1_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog1);
+
+int intel_scu_ipc_msic_vprog2(int on)
+{
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ return intel_scu_ipc_iowrite8(MSIC_VPROG2_MRFLD_CTRL,
+ on ? MSIC_VPROG2_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+ else
+ return intel_scu_ipc_iowrite8(MSIC_VPROG2_CTRL,
+ on ? MSIC_VPROG2_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog2);
+
/**
* scu_reg_access - implement register access ioctls
* @cmd: command we are doing (read/write/update)
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
- int count = data->count;
+ int ret;
- if (count == 0 || count == 3 || count > 4)
+ if (data->count == 0 || data->count > 5)
return -EINVAL;
switch (cmd) {
- case INTE_SCU_IPC_REGISTER_READ:
- return intel_scu_ipc_readv(data->addr, data->data, count);
- case INTE_SCU_IPC_REGISTER_WRITE:
- return intel_scu_ipc_writev(data->addr, data->data, count);
- case INTE_SCU_IPC_REGISTER_UPDATE:
- return intel_scu_ipc_update_register(data->addr[0],
- data->data[0], data->mask);
+ case INTEL_SCU_IPC_REGISTER_READ:
+ ret = intel_scu_ipc_readv(data->addr, data->data, data->count);
+ break;
+ case INTEL_SCU_IPC_REGISTER_WRITE:
+ ret = intel_scu_ipc_writev(data->addr, data->data, data->count);
+ break;
+ case INTEL_SCU_IPC_REGISTER_UPDATE:
+ ret = intel_scu_ipc_update_register(data->addr[0],
+ data->data[0],
+ data->mask);
+ break;
default:
return -ENOTTY;
}
+ return ret;
+}
+
+#define check_pmdb_sub_cmd(x) (x == PMDB_SUB_CMD_R_OTPCTL || \
+ x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_W_WMDB || \
+ x == PMDB_SUB_CMD_R_OTPDB || x == PMDB_SUB_CMD_W_OTPDB)
+#define pmdb_sub_cmd_is_read(x) (x == PMDB_SUB_CMD_R_OTPCTL || \
+ x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_R_OTPDB)
+
+static int check_pmdb_buffer(struct scu_ipc_pmdb_buffer *p_buf)
+{
+ int size;
+
+ switch (p_buf->sub) {
+ case PMDB_SUB_CMD_R_WMDB:
+ case PMDB_SUB_CMD_W_WMDB:
+ size = PMDB_WMDB_SIZE;
+ break;
+ case PMDB_SUB_CMD_R_OTPDB:
+ case PMDB_SUB_CMD_W_OTPDB:
+ size = PMDB_OTPDB_SIZE;
+ break;
+ case PMDB_SUB_CMD_R_OTPCTL:
+ size = PMDB_OTPCTL_SIZE;
+ break;
+ default:
+ size = 0;
+ }
+
+ return check_pmdb_sub_cmd(p_buf->sub) &&
+ (p_buf->count + p_buf->offset < size) &&
+ (p_buf->count % 4 == 0);
+}
+
+/**
+ * scu_pmdb_access - access PMDB data through SCU IPC cmds
+ * @p_buf: PMDB access buffer, it describe the data to write/read.
+ * p_buf->sub - SCU IPC sub cmd of PMDB access,
+ * this sub cmd distinguish different componet
+ * in PMDB which to be accessd. (WMDB, OTPDB, OTPCTL)
+ * p_buf->count - access data's count;
+ * p_buf->offset - access data's offset for each component in PMDB;
+ * p_buf->data - data to write/read.
+ *
+ * Write/read data to/from PMDB.
+ *
+ */
+static int scu_pmdb_access(struct scu_ipc_pmdb_buffer *p_buf)
+{
+ int i, offset, ret = -EINVAL;
+ u8 *p_data;
+
+ if (!check_pmdb_buffer(p_buf)) {
+ pr_err("Invalid PMDB buffer!\n");
+ return -EINVAL;
+ }
+
+ /* 1. we use rpmsg_send_raw_command() IPC cmd interface
+ * to access PMDB data. Each call of rpmsg_send_raw_command()
+ * can only access at most PMDB_ACCESS_SIZE bytes' data.
+ * 2. There are two kinds of pmdb sub commands, read command
+ * and write command. For read command, we must transport
+ * in and out buffer to rpmsg_send_raw_command(), because
+ * in buffer length is pass as access length which must
+ * be transported to SCU.
+ */
+ p_data = p_buf->data;
+ offset = p_buf->offset;
+ for (i = 0; i < p_buf->count/PMDB_ACCESS_SIZE; i++) {
+ if (pmdb_sub_cmd_is_read(p_buf->sub))
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_PMDB, p_buf->sub,
+ p_data, (u32 *)p_data,
+ PMDB_ACCESS_SIZE, PMDB_ACCESS_SIZE / 4,
+ 0, offset);
+ else
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_PMDB, p_buf->sub,
+ p_data, NULL, PMDB_ACCESS_SIZE,
+ 0, 0, offset);
+ if (ret < 0) {
+ pr_err("intel_scu_ipc_raw_cmd failed!\n");
+ return ret;
+ }
+ offset += PMDB_ACCESS_SIZE;
+ p_data += PMDB_ACCESS_SIZE;
+ }
+ if (p_buf->count % PMDB_ACCESS_SIZE > 0) {
+ if (pmdb_sub_cmd_is_read(p_buf->sub))
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_PMDB, p_buf->sub,
+ p_data, (u32 *)p_data,
+ p_buf->count % PMDB_ACCESS_SIZE,
+ (p_buf->count % PMDB_ACCESS_SIZE) / 4,
+ 0, offset);
+ else
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_PMDB, p_buf->sub,
+ p_data, NULL,
+ p_buf->count % PMDB_ACCESS_SIZE,
+ 0, 0, offset);
+ if (ret < 0) {
+ pr_err("intel_scu_ipc_raw_cmd failed!\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int do_pmdb_user_buf_access(void __user *argp)
+{
+ int ret;
+ struct scu_ipc_pmdb_buffer *p_buf;
+
+ p_buf = kzalloc(sizeof(struct scu_ipc_pmdb_buffer), GFP_KERNEL);
+ if (p_buf == NULL) {
+ pr_err("failed to allocate memory for pmdb buffer!\n");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(p_buf, argp, sizeof(struct scu_ipc_pmdb_buffer));
+ if (ret < 0) {
+ pr_err("copy from user failed!!\n");
+ goto err;
+ }
+
+ ret = scu_pmdb_access(p_buf);
+ if (ret < 0) {
+ pr_err("scu_pmdb_access error!\n");
+ goto err;
+ }
+
+ if (pmdb_sub_cmd_is_read(p_buf->sub)) {
+ ret = copy_to_user(argp + 3 * sizeof(u32),
+ p_buf->data, p_buf->count);
+ if (ret < 0)
+ pr_err("copy to user failed!!\n");
+ }
+
+err:
+ kfree(p_buf);
+ return ret;
}
/**
static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
+ int ret = -EINVAL;
struct scu_ipc_data data;
void __user *argp = (void __user *)arg;
- if (!capable(CAP_SYS_RAWIO))
+ /* Only IOCTL cmd allowed to pass through without capability check */
+ /* is getting fw version info, all others need to check to prevent */
+ /* arbitrary access to all sort of bit of the hardware exposed here*/
+
+ if ((cmd != INTEL_SCU_IPC_FW_REVISION_GET &&
+ cmd != INTEL_SCU_IPC_FW_REVISION_EXT_GET &&
+ cmd != INTEL_SCU_IPC_S0IX_RESIDENCY) &&
+ !capable(CAP_SYS_RAWIO))
return -EPERM;
- if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
- return -EFAULT;
- ret = scu_reg_access(cmd, &data);
+ switch (cmd) {
+ case INTEL_SCU_IPC_S0IX_RESIDENCY:
+ {
+ void __iomem *s0ix_residencies_addr;
+ u8 dump_results[ALL_RESIDENCY_DATA_SIZE] = {0};
+ u32 cmd_id;
+
+ if (copy_from_user(&cmd_id, argp, sizeof(u32))) {
+ pr_err("copy from user failed!!\n");
+ return -EFAULT;
+ }
+
+ /* Check get residency counter valid cmd range */
+
+ if (cmd_id > IPC_RESIDENCY_CMD_ID_DUMP) {
+ pr_err("invalid si0x residency sub-cmd id!\n");
+ return -EINVAL;
+ }
+
+ ret = rpmsg_send_simple_command(ipcutil_instance,
+ RP_S0IX_COUNTER, cmd_id);
+
+ if (ret < 0) {
+ pr_err("ipc_get_s0ix_counter failed!\n");
+ return ret;
+ }
+
+ if (cmd_id == IPC_RESIDENCY_CMD_ID_DUMP) {
+ s0ix_residencies_addr = ioremap_nocache(
+ SRAM_ADDR_S0IX_RESIDENCY,
+ ALL_RESIDENCY_DATA_SIZE);
+
+ if (!s0ix_residencies_addr) {
+ pr_err("ioremap SRAM address failed!!\n");
+ return -EFAULT;
+ }
+
+ memcpy(&dump_results[0], s0ix_residencies_addr,
+ ALL_RESIDENCY_DATA_SIZE);
+
+ iounmap(s0ix_residencies_addr);
+ ret = copy_to_user(argp, &dump_results[0],
+ ALL_RESIDENCY_DATA_SIZE);
+ }
+
+ break;
+ }
+ case INTEL_SCU_IPC_READ_RR_FROM_OSNIB:
+ {
+ u8 reboot_reason;
+ ret = intel_scu_ipc_read_osnib_rr(&reboot_reason);
+ if (ret < 0)
+ return ret;
+ ret = copy_to_user(argp, &reboot_reason, 1);
+ break;
+ }
+ case INTEL_SCU_IPC_WRITE_RR_TO_OSNIB:
+ {
+ u8 data;
+
+ ret = copy_from_user(&data, (u8 *)arg, 1);
+ if (ret < 0) {
+ pr_err("copy from user failed!!\n");
+ return ret;
+ }
+ ret = intel_scu_ipc_write_osnib_rr(data);
+ break;
+ }
+ case INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB:
+ {
+ u8 flag, data;
+ ret = copy_from_user(&flag, (u8 *)arg, 1);
+ if (ret < 0) {
+ pr_err("copy from user failed!!\n");
+ return ret;
+ }
+
+ ret = oshob_info->scu_ipc_read_osnib(
+ &data,
+ 1,
+ offsetof(struct scu_ipc_osnib, alarm));
+
+ if (ret < 0)
+ return ret;
+ if (flag) {
+ data = data | 0x1; /* set alarm flag */
+ pr_info("scu_ipc_ioctl: set alarm flag\n");
+ } else {
+ data = data & 0xFE; /* clear alarm flag */
+ pr_info("scu_ipc_ioctl: clear alarm flag\n");
+ }
+
+ ret = oshob_info->scu_ipc_write_osnib(
+ &data,
+ 1,
+ offsetof(struct scu_ipc_osnib, alarm));
+
+ break;
+ }
+ case INTEL_SCU_IPC_READ_VBATTCRIT:
+ {
+ u32 value = 0;
+
+ pr_info("cmd = INTEL_SCU_IPC_READ_VBATTCRIT");
+ ret = intel_scu_ipc_read_mip((u8 *)&value, 4, 0x318, 1);
+ if (ret < 0)
+ return ret;
+ pr_info("VBATTCRIT VALUE = %x\n", value);
+ ret = copy_to_user(argp, &value, 4);
+ break;
+ }
+ case INTEL_SCU_IPC_FW_REVISION_GET:
+ case INTEL_SCU_IPC_FW_REVISION_EXT_GET:
+ {
+ struct scu_ipc_version version;
+
+ if (copy_from_user(&version, argp, sizeof(u32)))
+ return -EFAULT;
+
+ if (version.count > 16)
+ return -EINVAL;
+
+ ret = rpmsg_send_command(ipcutil_instance, RP_GET_FW_REVISION,
+ cmd & 0x1, NULL, (u32 *)version.data, 0, 4);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(argp + sizeof(u32),
+ version.data, version.count))
+ ret = -EFAULT;
+ break;
+ }
+ case INTEL_SCU_IPC_OSC_CLK_CNTL:
+ {
+ struct osc_clk_t osc_clk;
+
+ if (copy_from_user(&osc_clk, argp, sizeof(struct osc_clk_t)))
+ return -EFAULT;
+
+ ret = intel_scu_ipc_osc_clk(osc_clk.id, osc_clk.khz);
+ if (ret)
+ pr_err("%s: failed to set osc clk\n", __func__);
+
+ break;
+ }
+ case INTEL_SCU_IPC_PMDB_ACCESS:
+ {
+ ret = do_pmdb_user_buf_access(argp);
+
+ break;
+ }
+ default:
+ if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ ret = scu_reg_access(cmd, &data);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ return 0;
+ }
+
+ return ret;
+}
+
+int intel_scu_ipc_get_oshob_base(void)
+{
+ if (oshob_info == NULL)
+ return NULL;
+
+ return oshob_info->oshob_base;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_base);
+
+int intel_scu_ipc_get_oshob_size(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+
+ return oshob_info->oshob_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_size);
+
+int intel_scu_ipc_read_oshob(u8 *data, int len, int offset)
+{
+ int ret = 0, i;
+ void __iomem *oshob_addr;
+ u8 *ptr = data;
+
+ oshob_addr = ioremap_nocache(
+ oshob_info->oshob_base,
+ oshob_info->oshob_size);
+
+ if (!oshob_addr) {
+ pr_err("ipc_read_oshob: addr ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < len; i = i+1) {
+ *ptr = readb(oshob_addr + offset + i);
+ pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+ (u32)(oshob_addr + i),
+ offset + i, *ptr);
+ ptr++;
+ }
+
+ iounmap(oshob_addr);
+exit:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_oshob);
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset)
+{
+ int i, ret = 0;
+ u32 osnibw_ptr;
+ u8 *ptr, check = 0;
+ u16 struct_offs;
+ void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+
+ pr_debug("OSHOB base addr value is %x\n", oshob_info->oshob_base);
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+ oshob_info->offs_add;
+ osnibr_addr = oshob_addr + struct_offs;
+
+ if (!osnibr_addr) {
+ pr_err("Bad osnib address!\n");
+ ret = -EFAULT;
+ iounmap(oshob_addr);
+ goto exit;
+ }
+
+ pr_debug("OSNIB read addr (remapped) is %x\n",
+ (unsigned int)osnibr_addr);
+
+ /* Make a chksum verification for osnib */
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ check += readb(osnibr_addr + i);
+ if (check) {
+ pr_err("WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+ struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+ oshob_info->offs_add;
+ osnibw_ptr = readl(oshob_addr + struct_offs);
+ osnibw_addr = ioremap_nocache(
+ osnibw_ptr, oshob_info->osnib_size);
+ if (osnibw_addr) {
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ writeb(0, osnibw_addr + i);
+ rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0,
+ NULL, NULL, 0, 0,
+ 0xFFFFFFFF, 0);
+ iounmap(osnibw_addr);
+ }
+ }
+
+ ptr = data;
+ for (i = 0; i < len; i++) {
+ *ptr = readb(osnibr_addr + offset + i);
+ pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+ (u32)(osnibr_addr+offset+i), offset+i, *ptr);
+ ptr++;
+ }
+
+ iounmap(oshob_addr);
+exit:
+ return ret;
+}
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset)
+{
+ int i;
+ int ret = 0;
+ u32 osnibw_ptr;
+ u8 osnib_data[oshob_info->osnib_size];
+ u8 check = 0, chksum = 0;
+ u16 struct_offs;
+ void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+
+ pr_debug("OSHOB base addr value is 0x%8x\n", oshob_info->oshob_base);
+
+ rpmsg_global_lock();
+
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /*Dump osnib data for generate chksum */
+ struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+ oshob_info->offs_add;
+ osnibr_addr = oshob_addr + struct_offs;
+
+ pr_debug("OSNIB read addr (remapped) in OSHOB at %x\n",
+ (unsigned int)osnibr_addr);
+
+ for (i = 0; i < oshob_info->osnib_size; i++) {
+ osnib_data[i] = readb(osnibr_addr + i);
+ check += osnib_data[i];
+ }
+ memcpy(osnib_data + offset, data, len);
+
+ if (check) {
+ pr_err("WARNING!!! OSNIB data chksum verification FAILED!\n");
+ } else {
+ /* generate chksum */
+ for (i = 0; i < oshob_info->osnib_size - 1; i++)
+ chksum += osnib_data[i];
+ osnib_data[oshob_info->osnib_size - 1] = ~chksum + 1;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+ oshob_info->offs_add;
+ osnibw_ptr = readl(oshob_addr + struct_offs);
+ if (osnibw_ptr == 0) { /* workaround here for BZ 2914 */
+ osnibw_ptr = 0xFFFF3400;
+ pr_err("ERR: osnibw ptr from oshob is 0, manually set it here\n");
+ }
+
+ pr_debug("POSNIB write address: %x\n", osnibw_ptr);
+
+ osnibw_addr = ioremap_nocache(osnibw_ptr, oshob_info->osnib_size);
+ if (!osnibw_addr) {
+ pr_err("ioremap failed!\n");
+ ret = -ENOMEM;
+ goto unmap_oshob_addr;
+ }
+
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ writeb(*(osnib_data + i), (osnibw_addr + i));
+
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0,
+ NULL, NULL, 0, 0,
+ 0xFFFFFFFF, 0);
if (ret < 0)
- return ret;
- if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
- return -EFAULT;
- return 0;
+ pr_err("ipc_write_osnib failed!!\n");
+
+ iounmap(osnibw_addr);
+
+unmap_oshob_addr:
+ iounmap(oshob_addr);
+exit:
+ rpmsg_global_unlock();
+
+ return ret;
}
-static const struct file_operations scu_ipc_fops = {
- .unlocked_ioctl = scu_ipc_ioctl,
-};
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset)
+{
+ int i, ret = 0;
+ u8 *ptr, check = 0;
+ void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+ u32 sptr_dw_mask;
-static int __init ipc_module_init(void)
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ipc_read_osnib_extend: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ pr_debug(
+ "ipc_read_osnib_extend: remap OSNIB addr=0x%x size %d\n",
+ oshob_info->osnibr_ptr, oshob_info->osnib_size);
+
+ osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+ oshob_info->osnib_size);
+
+ if (!osnibr_addr) {
+ pr_err("ipc_read_osnib_extend: ioremap of osnib failed!\n");
+ ret = -ENOMEM;
+ goto unmap_oshob_addr;
+ }
+
+ /* Make a chksum verification for osnib */
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ check += readb(osnibr_addr + i);
+
+ if (check) {
+ pr_err("ipc_read_osnib_extend: WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+ pr_debug(
+ "ipc_read_osnib_extend: remap osnibw ptr addr=0x%x size %d\n",
+ oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+ osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+ oshob_info->osnib_size);
+ if (!osnibw_addr) {
+ pr_err("ipc_read_osnib_extend: cannot remap osnib write ptr\n");
+ goto unmap_oshob_addr;
+ }
+
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ writeb(0, osnibw_addr + i);
+
+ /* Send command. The mask to be written identifies which */
+ /* double words of the OSNIB osnib_size bytes will be written.*/
+ /* So the mask is coded on 4 bytes. */
+ sptr_dw_mask = 0xFFFFFFFF;
+ rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB,
+ 0, NULL, NULL, 0, 0, sptr_dw_mask, 0);
+ iounmap(osnibw_addr);
+ }
+
+ ptr = data;
+ pr_debug("ipc_read_osnib_extend: OSNIB content:\n");
+ for (i = 0; i < len; i++) {
+ *ptr = readb(osnibr_addr + offset + i);
+ pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+ (u32)(osnibr_addr+offset+i), offset+i, *ptr);
+ ptr++;
+ }
+
+ iounmap(osnibr_addr);
+
+unmap_oshob_addr:
+ iounmap(oshob_addr);
+exit:
+ return ret;
+}
+
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset)
{
- major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
- if (major < 0)
- return major;
+ int i;
+ int ret = 0;
+ u8 *posnib_data, *ptr;
+ u8 check = 0, chksum = 0;
+ void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+ u32 sptr_dw_mask;
- return 0;
+ rpmsg_global_lock();
+
+ pr_debug(
+ "ipc_write_osnib_extend: remap OSHOB addr 0x%8x size %d\n",
+ oshob_info->oshob_base, oshob_info->oshob_size);
+
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ipc_write_osnib_extend: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+ oshob_info->osnib_size);
+
+ if (!osnibr_addr) {
+ pr_err("ipc_write_osnib_extend: ioremap of osnib failed!\n");
+ ret = -ENOMEM;
+ goto unmap_oshob_addr;
+ }
+
+ /* Dump osnib data for generate chksum */
+ posnib_data = kzalloc(oshob_info->osnib_size, GFP_KERNEL);
+
+ if (posnib_data == NULL) {
+ pr_err("ipc_write_osnib_extend: The buffer for getting OSNIB is NULL\n");
+ ret = -EFAULT;
+ iounmap(osnibr_addr);
+ goto unmap_oshob_addr;
+ }
+
+ ptr = posnib_data;
+ for (i = 0; i < oshob_info->osnib_size; i++) {
+ *ptr = readb(osnibr_addr + i);
+ check += *ptr;
+ ptr++;
+ }
+
+ memcpy(posnib_data + offset, data, len);
+
+ if (check) {
+ pr_err("ipc_write_osnib_extend: WARNING!!! OSNIB data chksum verification FAILED!\n");
+ } else {
+ /* generate chksum. */
+ pr_debug("ipc_write_osnib_extend: generating checksum\n");
+ for (i = 0; i < oshob_info->osnib_size - 1; i++)
+ chksum += *(posnib_data + i);
+ /* Fill checksum at the CHECKSUM offset place in OSNIB. */
+ *(posnib_data +
+ offsetof(struct scu_ipc_osnib, checksum)) = ~chksum + 1;
+ }
+
+ pr_debug(
+ "ipc_write_osnib_extend: remap osnibw ptr addr=0x%x size %d\n",
+ oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+ osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+ oshob_info->osnib_size);
+ if (!osnibw_addr) {
+ pr_err("scu_ipc_write_osnib_extend: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit_osnib;
+ }
+
+ for (i = 0; i < oshob_info->osnib_size; i++)
+ writeb(*(posnib_data + i), (osnibw_addr + i));
+
+ /* Send command. The mask to be written identifies which */
+ /* double words of the OSNIB osnib_size bytes will be written.*/
+ /* So the mask is coded on 4 bytes. */
+ sptr_dw_mask = 0xFFFFFFFF;
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0, NULL, NULL,
+ 0, 0, sptr_dw_mask, 0);
+ if (ret < 0)
+ pr_err("scu_ipc_write_osnib_extend: ipc_write_osnib failed!!\n");
+
+ iounmap(osnibw_addr);
+
+exit_osnib:
+ iounmap(osnibr_addr);
+
+ kfree(posnib_data);
+
+unmap_oshob_addr:
+ iounmap(oshob_addr);
+exit:
+ rpmsg_global_unlock();
+
+ return ret;
+}
+
+/*
+ * This writes the reboot reason in the OSNIB (factor and avoid any overlap)
+ */
+int intel_scu_ipc_write_osnib_rr(u8 rr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+ if (osnib_target_oses[i].id == rr) {
+ pr_info("intel_scu_ipc_write_osnib_rr: reboot reason: %s\n",
+ osnib_target_oses[i].target_os_name);
+ return oshob_info->scu_ipc_write_osnib(
+ &rr,
+ 1,
+ offsetof(struct scu_ipc_osnib, target_mode));
+ }
+ }
+
+ pr_warn("intel_scu_ipc_write_osnib_rr: reboot reason [0x%x] not found\n",
+ rr);
+ return -1;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_rr);
+
+/*
+ * This reads the reboot reason from the OSNIB (factor)
+ */
+int intel_scu_ipc_read_osnib_rr(u8 *rr)
+{
+ pr_debug("intel_scu_ipc_read_osnib_rr: read reboot reason\n");
+ return oshob_info->scu_ipc_read_osnib(
+ rr,
+ 1,
+ offsetof(struct scu_ipc_osnib, target_mode));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_osnib_rr);
+
+
+int intel_scu_ipc_read_oshob_extend_param(void __iomem *poshob_addr)
+{
+ u16 struct_offs;
+ int buff_size;
+
+ /* Get defined OSNIB space size. */
+ oshob_info->osnib_size = readw(
+ poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend, intel_size));
+
+ if (oshob_info->osnib_size == 0) {
+ pr_err("ipc_read_oshob_extend_param: OSNIB size is null!\n");
+ return -EFAULT;
+ }
+
+ /* Get defined OEM space size. */
+ oshob_info->oemnib_size = readw(
+ poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend, oem_size));
+
+ if (oshob_info->oemnib_size == 0) {
+ pr_err("ipc_read_oshob_extend_param: OEMNIB size is null!\n");
+ return -EFAULT;
+ }
+
+ /* Set SCU and IA trace buffers. Size calculated in bytes here. */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+ else
+ buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+ intel_scu_ipc_read_oshob(
+ (u8 *)(oshob_info->scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob_extend, scutxl));
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, iatxl) +
+ oshob_info->offs_add;
+ oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+ /* Set pointers */
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, r_intel_ptr) +
+ oshob_info->offs_add;
+ oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+ if (!oshob_info->osnibr_ptr) {
+ pr_err("ipc_read_oshob_extend_param: R_INTEL_POINTER is NULL!\n");
+ return -ENOMEM;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, w_intel_ptr) +
+ oshob_info->offs_add;
+ oshob_info->osnibw_ptr = readl(poshob_addr + struct_offs);
+
+ if (oshob_info->osnibw_ptr == 0) {
+ /* workaround here for BZ 2914 */
+ oshob_info->osnibw_ptr = 0xFFFF3400;
+ pr_err(
+ "ipc_read_oshob_extend_param: ERR: osnibw from oshob is 0, manually set it here\n");
+ }
+
+ pr_info("(extend oshob) osnib read ptr = 0x%8x\n",
+ oshob_info->osnibr_ptr);
+ pr_info("(extend oshob) osnib write ptr = 0x%8x\n",
+ oshob_info->osnibw_ptr);
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, r_oem_ptr) +
+ oshob_info->offs_add;
+ oshob_info->oemnibr_ptr = readl(poshob_addr + struct_offs);
+
+ if (!oshob_info->oemnibr_ptr) {
+ pr_err("ipc_read_oshob_extend_param: R_OEM_POINTER is NULL!\n");
+ return -ENOMEM;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, w_oem_ptr) +
+ oshob_info->offs_add;
+ oshob_info->oemnibw_ptr = readl(poshob_addr + struct_offs);
+
+ if (!oshob_info->oemnibw_ptr) {
+ pr_err("ipc_read_oshob_extend_param: W_OEM_POINTER is NULL!\n");
+ return -ENOMEM;
+ }
+
+ oshob_info->scu_ipc_write_osnib =
+ &intel_scu_ipc_write_osnib_extend;
+ oshob_info->scu_ipc_read_osnib =
+ &intel_scu_ipc_read_osnib_extend;
+
+ pr_info(
+ "Using extended oshob structure size = %d bytes\n",
+ oshob_info->oshob_size);
+ pr_info(
+ "OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+ oshob_info->osnib_size, oshob_info->oemnib_size);
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 1)) {
+ /* CLVP and correct version of the oshob. */
+ oshob_info->scu_trace_buf =
+ readl(poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend,
+ sculogbufferaddr));
+ oshob_info->scu_trace_size =
+ readl(poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend,
+ sculogbuffersize));
+ }
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 3)) {
+ /* CLVP and correct version of the oshob. */
+ oshob_info->nvram_addr =
+ readl(poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend,
+ nvram_addr));
+ oshob_info->nvram_size =
+ readl(poshob_addr +
+ offsetof(struct scu_ipc_oshob_extend,
+ nvram_size));
+ }
+ }
+ return 0;
+}
+
+int intel_scu_ipc_read_oshob_extend_param_v14(void __iomem *poshob_addr)
+{
+ u16 struct_offs;
+ int buff_size;
+
+ /* set intel OSNIB space size. */
+ oshob_info->osnib_size = OSNIB_SIZE;
+
+ /* set OEM OSNIB space size. */
+ oshob_info->oemnib_size = OSNIB_OEM_RSVD_SIZE;
+
+ /* Set SCU and IA trace buffers. Size calculated in bytes here. */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+ else
+ buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+ intel_scu_ipc_read_oshob(
+ (u8 *)(oshob_info->scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob_extend_v14, scutxl));
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, iatxl) +
+ oshob_info->offs_add;
+ oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+ /* Set pointers */
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, osnib_ptr) +
+ oshob_info->offs_add;
+ oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+ if (!oshob_info->osnibr_ptr) {
+ pr_err("ipc_read_oshob_extend_param_v14: R_INTEL_POINTER is NULL!\n");
+ return -ENOMEM;
+ }
+
+ /* write and read pointer are the same */
+ oshob_info->osnibw_ptr = oshob_info->osnibr_ptr;
+
+ pr_info("(latest extend oshob) osnib ptr = 0x%8x\n",
+ oshob_info->osnibr_ptr);
+
+ /* OEM NIB point at offset OSNIB_SIZE */
+ oshob_info->oemnibr_ptr = oshob_info->osnibr_ptr + OSNIB_SIZE;
+
+ /* write and read pinter are the same */
+ oshob_info->oemnibw_ptr = oshob_info->oemnibr_ptr;
+
+ /* we use tha same function for all extended OSHOB structure */
+ oshob_info->scu_ipc_write_osnib =
+ &intel_scu_ipc_write_osnib_extend;
+ oshob_info->scu_ipc_read_osnib =
+ &intel_scu_ipc_read_osnib_extend;
+
+ pr_info(
+ "Using latest extended oshob structure size = %d bytes\n",
+ oshob_info->oshob_size);
+ pr_info(
+ "OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+ oshob_info->osnib_size, oshob_info->oemnib_size);
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+ sculogbufferaddr) + oshob_info->offs_add;
+ oshob_info->scu_trace_buf = readl(poshob_addr + struct_offs);
+
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+ sculogbuffersize) + oshob_info->offs_add;
+ oshob_info->scu_trace_size = readl(poshob_addr + struct_offs);
+
+ /* NVRAM after Intel and OEM OSNIB */
+ oshob_info->nvram_addr = oshob_info->oemnibr_ptr + OSNIB_OEM_RSVD_SIZE;
+ oshob_info->nvram_size = OSNIB_NVRAM_SIZE;
+
+ return 0;
+}
+
+int intel_scu_ipc_read_oshob_def_param(void __iomem *poshob_addr)
+{
+ u16 struct_offs;
+ int ret = 0;
+ int buff_size;
+
+ oshob_info->oshob_majrev = OSHOB_REV_MAJ_DEFAULT;
+ oshob_info->oshob_minrev = OSHOB_REV_MIN_DEFAULT;
+ oshob_info->osnib_size = OSNIB_SIZE;
+ oshob_info->oemnib_size = 0;
+
+ /* Set OSHOB total size */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ oshob_info->oshob_size = OSHOB_MRFLD_SIZE;
+ else
+ oshob_info->oshob_size = OSHOB_SIZE;
+
+ /* Set SCU and IA trace buffers. Size calculated in bytes here. */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+ else
+ buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(oshob_info->scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob, scutxl));
+
+ if (ret != 0) {
+ pr_err("Cannot get scutxl data from OSHOB\n");
+ return ret;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+ oshob_info->offs_add;
+ oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+ oshob_info->scu_ipc_write_osnib =
+ &intel_scu_ipc_write_osnib;
+ oshob_info->scu_ipc_read_osnib =
+ &intel_scu_ipc_read_osnib;
+
+ struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+ oshob_info->offs_add;
+ oshob_info->osnibr_ptr = (unsigned long)(poshob_addr + struct_offs);
+
+ pr_info("Using default oshob structure size = %d bytes\n",
+ oshob_info->oshob_size);
+
+ pr_debug("Using default oshob structure OSNIB read ptr %x\n",
+ oshob_info->osnibr_ptr);
+
+ return ret;
+}
+
+int intel_scu_ipc_read_oshob_info(void)
+{
+ int i, ret = 0;
+ u32 oshob_base = 0;
+ void __iomem *oshob_addr;
+ unsigned char oshob_magic[4];
+
+ ret = rpmsg_send_command(ipcutil_instance,
+ RP_GET_HOBADDR, 0, NULL, &oshob_base, 0, 1);
+
+ if (ret < 0) {
+ pr_err("ipc_read_oshob cmd failed!!\n");
+ goto exit;
+ }
+
+ /* At this stage, we still don't know which OSHOB type (default or */
+ /* extended) can be used, and the size of resource to be remapped */
+ /* depends on the type of OSHOB structure to be used. */
+ /* So just remap the minimum size to get the needed bytes of the */
+ /* OSHOB zone. */
+ oshob_addr = ioremap_nocache(oshob_base, OSHOB_EXTEND_DESC_SIZE);
+
+ if (!oshob_addr) {
+ pr_err("oshob addr ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ pr_info("(oshob) base addr = 0x%8x\n", oshob_base);
+
+ /* Store base address. */
+ oshob_info->oshob_base = oshob_base;
+
+ oshob_info->platform_type = intel_mid_identify_cpu();
+
+ /*
+ * Buffer is allocated using kmalloc. Memory is not initialized and
+ * these fields are not updated in all the branches.
+ */
+ oshob_info->scu_trace_buf = 0;
+ oshob_info->scu_trace_size = 0;
+ oshob_info->nvram_addr = 0;
+ oshob_info->nvram_size = 0;
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ pr_info("(oshob) identified platform = INTEL_MID_CPU_CHIP_TANGIER\n");
+
+ /* By default we already have 1 dword reserved in the OSHOB */
+ /* structures for SCU buffer. For Merrifield, SCU size to */
+ /* consider is OSHOB_SCU_BUF_MRFLD_DW_SIZE dwords. So with */
+ /* Merrifield, when calculating structures offsets, we have */
+ /* to add (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) dwords, with */
+ /* the offsets calculated in bytes. */
+ oshob_info->offs_add = (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1)*4;
+ } else
+ oshob_info->offs_add = 0;
+
+ pr_debug("(oshob) additional offset = 0x%x\n", oshob_info->offs_add);
+
+ /* Extract magic number that will help identifying the good OSHOB */
+ /* that is going to be used. */
+ for (i = 0; i < OSHOB_HEADER_MAGIC_SIZE; i = i+1)
+ oshob_magic[i] = readb(oshob_addr + i);
+
+ pr_debug("(oshob) OSHOB magic = %x %x %x %x\n",
+ oshob_magic[0], oshob_magic[1], oshob_magic[2], oshob_magic[3]);
+
+ if (strncmp(oshob_magic, OSHOB_MAGIC_NUMBER,
+ OSHOB_HEADER_MAGIC_SIZE) == 0) {
+ /* Get OSHOB version and size which are commoon to all */
+ /* extended OSHOB structure. */
+ oshob_info->oshob_majrev = readb(oshob_addr +
+ offsetof(struct scu_ipc_oshob_extend, rev_major));
+ oshob_info->oshob_minrev = readb(oshob_addr +
+ offsetof(struct scu_ipc_oshob_extend, rev_minor));
+ oshob_info->oshob_size = readw(oshob_addr +
+ offsetof(struct scu_ipc_oshob_extend, oshob_size));
+
+ pr_info("(oshob) oshob version = %x.%x\n",
+ oshob_info->oshob_majrev, oshob_info->oshob_minrev);
+
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ if (intel_scu_ipc_read_oshob_extend_param_v14(
+ oshob_addr) != 0) {
+ ret = -EFAULT;
+ goto unmap_oshob;
+ }
+ } else {
+ if (intel_scu_ipc_read_oshob_extend_param(
+ oshob_addr) != 0) {
+ ret = -EFAULT;
+ goto unmap_oshob;
+ }
+ }
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ pr_info("(extend oshob) SCU buffer size is %d bytes\n",
+ OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+ } else {
+ pr_debug("(extend oshob) SCU buffer size is %d bytes\n",
+ OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+ }
+ } else {
+ ret = intel_scu_ipc_read_oshob_def_param(oshob_addr);
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ pr_debug("(default oshob) SCU buffer size is %d bytes\n",
+ OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+ } else {
+ pr_debug("(default oshob) SCU buffer size is %d bytes\n",
+ OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+ }
+ }
+
+unmap_oshob:
+ iounmap(oshob_addr);
+
+exit:
+ return ret;
+}
+
+/*
+ * This writes the OEMNIB buffer in the internal RAM of the SCU.
+ */
+int intel_scu_ipc_write_oemnib(u8 *oemnib, int len, int offset)
+{
+ int i;
+ int ret = 0;
+ void __iomem *oshob_addr, *oemnibw_addr;
+ u32 sptr_dw_mask;
+
+ if (oemnib == NULL) {
+ pr_err("ipc_write_oemnib: passed buffer for writting OEMNIB is NULL\n");
+ return -EINVAL;
+ }
+
+ rpmsg_global_lock();
+
+ pr_debug("ipc_write_oemnib: remap OSHOB addr 0x%8x size %d\n",
+ oshob_info->oshob_base, oshob_info->oshob_size);
+
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ipc_write_oemnib: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if ((len == 0) || (len > oshob_info->oemnib_size)) {
+ pr_err(
+ "ipc_write_oemnib: bad OEMNIB data length (%d) to write (max=%d bytes)\n",
+ len, oshob_info->oemnib_size);
+ ret = -EINVAL;
+ goto unmap_oshob_addr;
+ }
+
+ /* offset shall start at 0 from the OEMNIB base address and shall */
+ /* not exceed the OEMNIB allowed size. */
+ if ((offset < 0) || (offset >= oshob_info->oemnib_size) ||
+ (len + offset > oshob_info->oemnib_size)) {
+ pr_err(
+ "ipc_write_oemnib: Bad OEMNIB data offset/len for writing (offset=%d , len=%d)\n",
+ offset, len);
+ ret = -EINVAL;
+ goto unmap_oshob_addr;
+ }
+
+ pr_debug("ipc_write_oemnib: POEMNIB remap oemnibw ptr 0x%x size %d\n",
+ oshob_info->oemnibw_ptr, oshob_info->oemnib_size);
+
+ oemnibw_addr = ioremap_nocache(oshob_info->oemnibw_ptr,
+ oshob_info->oemnib_size);
+ if (!oemnibw_addr) {
+ pr_err("ipc_write_oemnib: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto unmap_oshob_addr;
+ }
+
+ for (i = 0; i < len; i++)
+ writeb(*(oemnib + i), (oemnibw_addr + offset + i));
+
+ /* Send command. The mask to be written identifies which double */
+ /* words of the OSNIB oemnib_size bytes will be written. */
+ /* So the mask is coded on 4 bytes. */
+ sptr_dw_mask = 0xFFFFFFFF;
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ sptr_dw_mask = 0xFFFFFFFF;
+ /* OEM NIB lies on region 1, 2, and 3 */
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0, NULL, NULL,
+ 0, 0, sptr_dw_mask, 1);
+ if (ret < 0) {
+ pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+ goto unmap_oemnibw_addr;
+ }
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0, NULL, NULL,
+ 0, 0, sptr_dw_mask, 2);
+ if (ret < 0) {
+ pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+ goto unmap_oemnibw_addr;
+ }
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OSNIB, 0, NULL, NULL,
+ 0, 0, sptr_dw_mask, 3);
+ if (ret < 0) {
+ pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+ goto unmap_oemnibw_addr;
+ }
+ } else {
+ ret = rpmsg_send_raw_command(ipcutil_instance,
+ RP_WRITE_OEMNIB, 0, NULL, NULL,
+ 0, 0, sptr_dw_mask, 0);
+ if (ret < 0) {
+ pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+ goto unmap_oemnibw_addr;
+ }
+ }
+
+unmap_oemnibw_addr:
+ iounmap(oemnibw_addr);
+
+unmap_oshob_addr:
+ iounmap(oshob_addr);
+exit:
+ rpmsg_global_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_oemnib);
+
+/*
+ * This reads the OEMNIB from the internal RAM of the SCU.
+ */
+static int intel_scu_ipc_read_oemnib(u8 *oemnib, int len, int offset)
+{
+ int i, ret = 0;
+ u8 *ptr;
+ void __iomem *oshob_addr, *oemnibr_addr;
+
+ if (oemnib == NULL) {
+ pr_err("ipc_read_oemnib: passed buffer for reading OEMNIB is NULL\n");
+ return -EINVAL;
+ }
+
+ pr_debug("ipc_read_oemnib: remap OSHOB base addr 0x%x size %d\n",
+ oshob_info->oshob_base, oshob_info->oshob_size);
+
+ oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+ oshob_info->oshob_size);
+ if (!oshob_addr) {
+ pr_err("ipc_read_oemnib: ioremap failed!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if ((len == 0) || (len > oshob_info->oemnib_size)) {
+ pr_err("ipc_read_oemnib: Bad OEMNIB data length (%d) to be read (max=%d bytes)\n",
+ len, oshob_info->oemnib_size);
+ ret = -EINVAL;
+ goto unmap_oshob_addr;
+ }
+
+ /* offset shall start at 0 from the OEMNIB base address and shall */
+ /* not exceed the OEMNIB allowed size. */
+ if ((offset < 0) || (offset >= oshob_info->oemnib_size) ||
+ (len + offset > oshob_info->oemnib_size)) {
+ pr_err(
+ "ipc_read_oemnib: Bad OEMNIB data offset/len to read (offset=%d ,len=%d)\n",
+ offset, len);
+ ret = -EINVAL;
+ goto unmap_oshob_addr;
+ }
+
+ pr_debug("ipc_read_oemnib: POEMNIB remap oemnibr ptr 0x%x size %d\n",
+ oshob_info->oemnibr_ptr, oshob_info->oemnib_size);
+
+ oemnibr_addr = ioremap_nocache(oshob_info->oemnibr_ptr,
+ oshob_info->oemnib_size);
+
+ if (!oemnibr_addr) {
+ pr_err("ipc_read_oemnib: ioremap of oemnib failed!\n");
+ ret = -ENOMEM;
+ goto unmap_oshob_addr;
+ }
+
+ ptr = oemnib;
+ pr_debug("ipc_read_oemnib: OEMNIB content:\n");
+ for (i = 0; i < len; i++) {
+ *ptr = readb(oemnibr_addr + offset + i);
+ pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+ (u32)(oemnibr_addr+offset+i), offset+i, *ptr);
+ ptr++;
+ }
+
+ iounmap(oemnibr_addr);
+
+unmap_oshob_addr:
+ iounmap(oshob_addr);
+exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_oemnib);
+
+#ifdef DUMP_OSNIB
+/*
+ * This reads the PMIT from the OSHOB (pointer to interrupt tree)
+ */
+static int intel_scu_ipc_read_oshob_it_tree(u32 *ptr)
+{
+ u16 struct_offs;
+
+ pr_debug("intel_scu_ipc_read_oshob_it_tree: read IT tree\n");
+
+ if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+ (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+ struct_offs = offsetof(struct scu_ipc_oshob, pmit) +
+ oshob_info->offs_add;
+ } else if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, pmit) +
+ oshob_info->offs_add;
+ } else {
+ struct_offs = offsetof(struct scu_ipc_oshob_extend, pmit) +
+ oshob_info->offs_add;
+ }
+ return intel_scu_ipc_read_oshob(
+ (u8 *) ptr,
+ 4,
+ struct_offs);
+}
+#endif
+
+/*
+ * This reads the RESETIRQ1 or RESETSRC0 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev1(u8 *rev1)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+ if (chip_reset_events[i].id == oshob_info->platform_type) {
+ pr_debug(
+ "intel_scu_ipc_read_osnib_rst_ev1: read %s\n",
+ chip_reset_events[i].reset_ev1_name);
+
+ return oshob_info->scu_ipc_read_osnib(
+ rev1,
+ 1,
+ offsetof(struct scu_ipc_osnib, reset_ev1));
+ }
+ }
+
+ pr_err("intel_scu_ipc_read_osnib_reset_ev1: param not found\n");
+ return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the RESETIRQ2 or RESETSRC1 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev2(u8 *rev2)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(chip_reset_events); i++) {
+ if (chip_reset_events[i].id == oshob_info->platform_type) {
+ pr_debug(
+ "intel_scu_ipc_read_osnib_rst_ev2: read %s\n",
+ chip_reset_events[i].reset_ev2_name);
+
+ return oshob_info->scu_ipc_read_osnib(
+ rev2,
+ 1,
+ offsetof(struct scu_ipc_osnib, reset_ev2));
+ }
+ }
+
+ pr_err("intel_scu_ipc_read_osnib_reset_ev2: param not found\n");
+ return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the WD from the OSNIB
+ */
+int intel_scu_ipc_read_osnib_wd(u8 *wd)
+{
+ pr_debug("intel_scu_ipc_read_osnib_wd: read WATCHDOG\n");
+
+ return oshob_info->scu_ipc_read_osnib(
+ wd,
+ 1,
+ offsetof(struct scu_ipc_osnib, wd_count));
+}
+
+/*
+ * This writes the WD in the OSNIB
+ */
+int intel_scu_ipc_write_osnib_wd(u8 *wd)
+{
+ pr_info("intel_scu_ipc_write_osnib_wd: write WATCHDOG %x\n", *wd);
+
+ return oshob_info->scu_ipc_write_osnib(
+ wd,
+ 1,
+ offsetof(struct scu_ipc_osnib, wd_count));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_wd);
+
+/*
+ * Get SCU trace buffer physical address if available
+ */
+u32 intel_scu_ipc_get_scu_trace_buffer(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+ return oshob_info->scu_trace_buf;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer);
+
+/*
+ * Get SCU trace buffer size
+ */
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+ return oshob_info->scu_trace_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer_size);
+
+/*
+ * Get nvram size
+ */
+u32 intel_scu_ipc_get_nvram_size(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+ return oshob_info->nvram_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_size);
+
+/*
+ * Get nvram addr
+ */
+u32 intel_scu_ipc_get_nvram_addr(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+ return oshob_info->nvram_addr;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_addr);
+
+/*
+ * Get SCU fabric error buffer1 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+ return offsetof(struct scu_ipc_oshob_extend, fabricerrlog1);
+ else if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ return offsetof(struct scu_ipc_oshob_extend_v14,
+ fabricerrlog) + oshob_info->offs_add;
+ } else {
+ return offsetof(struct scu_ipc_oshob,
+ fab_err_log) + oshob_info->offs_add;
+ }
+ else {
+ pr_err("scu_ipc_get_fabricerror_buf_offset: platform not recognized!\n");
+ return 0;
+ }
+}
+
+/*
+ * Get SCU fabric error buffer2 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void)
+{
+ if (oshob_info == NULL)
+ return 0;
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+ return offsetof(struct scu_ipc_oshob_extend, fabricerrlog2);
+ else {
+ pr_warn("scu_ipc_get_fabricerror_buf2_offset: not supported for this platform!\n");
+ return 0;
+ }
+}
+
+
+/*
+ * This reads the ALARM from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_alarm(u8 *alarm)
+{
+ pr_debug("intel_scu_ipc_read_osnib_alarm: read ALARM\n");
+
+ return oshob_info->scu_ipc_read_osnib(
+ alarm,
+ 1,
+ offsetof(struct scu_ipc_osnib, alarm));
+}
+#endif
+
+/*
+ * This reads the WAKESRC from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_wakesrc(u8 *wksrc)
+{
+ pr_debug("intel_scu_ipc_read_osnib_wakesrc: read WAKESRC\n");
+
+ return oshob_info->scu_ipc_read_osnib(
+ wksrc,
+ 1,
+ offsetof(struct scu_ipc_osnib, wakesrc));
+}
+#endif
+
+
+#define OEMNIB_BUF_DESC_LEN 4096
+
+#ifdef CONFIG_DEBUG_FS
+static int intel_scu_ipc_oshob_stat(struct seq_file *m, void *unused)
+{
+ void __iomem *osnib;
+ int i, count;
+ int ret = 0;
+
+ u32 value;
+ if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+ (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+ seq_printf(m, "DEFAULT OSHOB\n");
+ seq_printf(m, "OSHOB size : %d\n", oshob_info->oshob_size);
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ seq_printf(m, "SCU trace : ");
+
+ for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+ seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+ seq_printf(m, "\n");
+ } else
+ seq_printf(m, "SCU trace : %x\n",
+ oshob_info->scu_trace[0]);
+
+ seq_printf(m, "IA trace : %x\n", oshob_info->ia_trace);
+ } else {
+ seq_printf(m, "EXTENDED OSHOB v%d.%d\n",
+ oshob_info->oshob_majrev,
+ oshob_info->oshob_minrev);
+ seq_printf(m, "OSHOB size : %d\n\n", oshob_info->oshob_size);
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ seq_printf(m, "SCU trace : ");
+
+ for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+ seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+ seq_printf(m, "\n");
+ } else
+ seq_printf(m, "SCU trace : %x\n",
+ oshob_info->scu_trace[0]);
+
+ seq_printf(m, "IA trace : %x\n\n", oshob_info->ia_trace);
+
+ seq_printf(m, "OSNIB size : %d\n", oshob_info->osnib_size);
+ seq_printf(m, "OSNIB read address : %x\n",
+ oshob_info->osnibr_ptr);
+ seq_printf(m, "OSNIB write address : %x\n",
+ oshob_info->osnibw_ptr);
+ /* Dump OSNIB */
+ osnib = ioremap_nocache(oshob_info->osnibr_ptr,
+ oshob_info->osnib_size);
+ if (!osnib) {
+ pr_err("Cannot remap OSNIB\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ i = 0;
+ count = 0; /* used for fancy presentation */
+ while (i < oshob_info->osnib_size) {
+ if (count%4 == 0)
+ seq_printf(m, "\nOSNIB[%08x] ",
+ oshob_info->osnibr_ptr+i);
+
+ value = readl(osnib+i);
+ seq_printf(m, "%08x ", value);
+ i += 4;
+ count++;
+ }
+ seq_printf(m, "\n\n");
+ iounmap(osnib);
+
+ seq_printf(m, "OEMNIB size : %d\n",
+ oshob_info->oemnib_size);
+ seq_printf(m, "OEMNIB read address : %x\n",
+ oshob_info->oemnibr_ptr);
+ seq_printf(m, "OEMNIB write address : %x\n",
+ oshob_info->oemnibw_ptr);
+ seq_printf(m, "\n\n");
+ }
+ return 0;
+}
+
+static int intel_scu_ipc_oemnib_stat(struct seq_file *m, void *unused)
+{
+ void __iomem *oemnib;
+ int i, count;
+ u32 value;
+
+ /* Dump OEMNIB */
+ oemnib = ioremap_nocache(oshob_info->oemnibr_ptr,
+ oshob_info->oemnib_size);
+
+ if (!oemnib) {
+ pr_err("Cannot remap OEMNIB\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ count = 0; /* used for fancy presentation */
+ while (i < oshob_info->oemnib_size) {
+ if (count%4 == 0)
+ seq_printf(m, "\nOEMNIB[%08x] ",
+ oshob_info->oemnibr_ptr+i);
+
+ value = readl(oemnib+i);
+ seq_printf(m, "%08x ", value);
+ i += 4;
+ count++;
+ }
+ seq_printf(m, "\n\n");
+ iounmap(oemnib);
+
+ return 0;
+}
+
+static ssize_t intel_scu_ipc_oshob_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_scu_ipc_oshob_stat, NULL);
+}
+
+static ssize_t intel_scu_ipc_oemnib_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_scu_ipc_oemnib_stat, NULL);
+}
+
+
+/*
+* debugfs interface: the "oemnib_write" stores the OEMNIB part of OSNIB,
+* starting at offset ppos.
+*/
+static ssize_t intel_scu_ipc_oemnib_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret, i;
+ u8 *posnib_data, *ptr;
+ char *ptrchar, *temp;
+
+ if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+ (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+ /* OEMNIB only usable with extended OSHOB structure. */
+ pr_err(
+ "Write OEMNIB: OEMNIB only usable with extended OSHOB structure.\n");
+ return -EFAULT;
+ }
+
+ pr_info("Write OEMNIB: number bytes = %d\n", count);
+
+ /* Note: when the string is passed through debugfs interface, the */
+ /* real count value includes the end of line \n. So we must take */
+ /* care to consider count - 1 as the real number of OEM bytes. */
+
+ if (buf == NULL) {
+ pr_err("Write OEMNIB: The passed OEMNIB buffer is NULL\n");
+ return -EINVAL;
+ }
+
+ if (count == 0) {
+ pr_err("Write OEMNIB: The OEMNIB data length to write is NULL\n");
+ return -EINVAL;
+ }
+
+ posnib_data = kzalloc(count - 1, GFP_KERNEL);
+
+ if (posnib_data == NULL) {
+ pr_err("Write OEMNIB: Cannot allocate buffer for writting OEMNIB\n");
+ return -ENOMEM;
+ }
+
+ memset(posnib_data, 0, count - 1);
+
+ temp = kzalloc(count - 1, GFP_KERNEL);
+
+ if (temp == NULL) {
+ pr_err(
+ "Write OEMNIB: Cannot allocate temp buffer for writting OEMNIB\n");
+ return -ENOMEM;
+ }
+
+ memset(temp, 0, count - 1);
+
+ if (copy_from_user(temp, buf, count - 1)) {
+ pr_err(
+ "Write OEMNIB: Cannot transfer from user buf to OEMNIB buf\n");
+ kfree(posnib_data);
+ return -EFAULT;
+ }
+
+ ptrchar = temp;
+ ptr = posnib_data;
+
+ for (i = 0; i <= count - 1; i++) {
+ if (*ptrchar >= '0' && *ptrchar <= '9')
+ *ptr = *ptrchar - '0';
+ if (*ptrchar >= 'A' && *ptrchar <= 'F')
+ *ptr = *ptrchar - 'A' + 10;
+ if (*ptrchar >= 'a' && *ptrchar <= 'f')
+ *ptr = *ptrchar - 'a' + 10;
+
+ ptrchar++;
+ ptr++;
+ }
+
+ ret = intel_scu_ipc_write_oemnib(posnib_data, count - 1, *ppos);
+
+ if (ret < 0) {
+ pr_err("Write OEMNIB: ipc write of OEMNIB failed!!\n");
+ kfree(posnib_data);
+ return ret;
+ }
+
+ kfree(posnib_data);
+ kfree(temp);
+
+ pr_info("Write OEMNIB: OEMNIB updated: count=%d bytes\n", count);
+
+ return count;
+}
+
+/* Attach the debugfs operations methods */
+static const struct file_operations scu_ipc_oemnib_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_scu_ipc_oemnib_open,
+ .read = seq_read,
+ .write = intel_scu_ipc_oemnib_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations scu_ipc_oshob_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_scu_ipc_oshob_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *scu_ipc_oemnib_dir;
+static struct dentry *scu_ipc_oemnib_file;
+static struct dentry *scu_ipc_oshob_file;
+
+/*
+* debugfs interface: init interface.
+*/
+static int intel_mid_scu_ipc_oemnib_debugfs_init(void)
+{
+ /* Create debugfs directory /sys/kernel/debug/intel_scu_oshob */
+ scu_ipc_oemnib_dir = debugfs_create_dir("intel_scu_oshob", NULL);
+
+ if (!scu_ipc_oemnib_dir) {
+ pr_err("cannot create OSHOB debugfs directory\n");
+ return -1;
+ }
+
+ /* Add operations /sys/kernel/debug/intel_scu_oshob to control */
+ /* the OEM. */
+ scu_ipc_oemnib_file = debugfs_create_file("oemnib_debug",
+ S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+ scu_ipc_oemnib_dir,
+ NULL, &scu_ipc_oemnib_fops);
+
+ if (!scu_ipc_oemnib_file) {
+ pr_err("cannot create OEMNIB debugfs file\n");
+ debugfs_remove(scu_ipc_oemnib_dir);
+ return -1;
+ }
+
+ /* Add operations /sys/kernel/debug/intel_scu_oshob to debug OSHOB */
+ /* content. */
+ scu_ipc_oshob_file = debugfs_create_file("oshob_dump",
+ S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+ scu_ipc_oemnib_dir, NULL, &scu_ipc_oshob_fops);
+
+ if (!scu_ipc_oshob_file) {
+ pr_err("cannot create OSHOB debugfs file\n");
+ debugfs_remove_recursive(scu_ipc_oemnib_dir);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+* debugfs interface: exit interface.
+*/
+static void intel_mid_scu_ipc_oemnib_debugfs_exit(void)
+{
+ debugfs_remove_recursive(scu_ipc_oemnib_dir);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct file_operations scu_ipc_fops = {
+ .unlocked_ioctl = scu_ipc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = scu_ipc_ioctl,
+#endif
+};
+
+static struct miscdevice scu_ipcutil = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mid_ipc",
+ .fops = &scu_ipc_fops,
+};
+
+static int oshob_init(void)
+{
+ int ret, i;
+ u16 struct_offs;
+
+#ifdef DUMP_OSNIB
+ u8 rr, reset_ev1, reset_ev2, wd, alarm, wakesrc, *ptr;
+ int rr_found = 0, wksrc_found = 0;
+ u32 pmit, scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4], ia_trace;
+ int buff_size;
+#endif
+
+ /* Identify the type and size of OSHOB to be used. */
+ ret = intel_scu_ipc_read_oshob_info();
+
+ if (ret != 0) {
+ pr_err("Cannot init ipc module: oshob info not read\n");
+ goto exit;
+ }
+
+#ifdef DUMP_OSNIB
+ /* Dumping reset events from the interrupt tree */
+ ret = intel_scu_ipc_read_oshob_it_tree(&pmit);
+
+ if (ret != 0) {
+ pr_err("Cannot read interrupt tree\n");
+ goto exit;
+ }
+
+ ptr = ioremap_nocache(pmit + PMIT_RESET1_OFFSET, 2);
+
+ if (!ptr) {
+ pr_err("Cannot remap PMIT\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ pr_debug("PMIT addr 0x%8x remapped to 0x%8x\n", pmit, (u32)ptr);
+
+ reset_ev1 = readb(ptr);
+ reset_ev2 = readb(ptr+1);
+ for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+ if (chip_reset_events[i].id == oshob_info->platform_type) {
+ pr_warn("[BOOT] %s=0x%02x %s=0x%02x (PMIT interrupt tree)\n",
+ chip_reset_events[i].reset_ev1_name,
+ reset_ev1,
+ chip_reset_events[i].reset_ev2_name,
+ reset_ev2);
+ }
+ }
+ iounmap(ptr);
+
+ /* Dumping OSHOB content */
+ if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+ (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+ /* Use default OSHOB here. Calculate in bytes here. */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+ else
+ buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob, scutxl));
+
+ if (ret != 0) {
+ pr_err("Cannot read SCU data\n");
+ goto exit;
+ }
+
+ struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+ oshob_info->offs_add;
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(&ia_trace),
+ 4,
+ struct_offs);
+
+ if (ret != 0) {
+ pr_err("Cannot read IA data\n");
+ goto exit;
+ }
+ } else {
+ /* Use extended OSHOB here. Calculate in bytes here. */
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+ buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+ else
+ buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob_extend_v14,
+ scutxl));
+ } else {
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(scu_trace),
+ buff_size,
+ offsetof(struct scu_ipc_oshob_extend, scutxl));
+ }
+
+ if (ret != 0) {
+ pr_err("Cannot read SCU data\n");
+ goto exit;
+ }
+
+ if ((oshob_info->oshob_majrev >= 1) &&
+ (oshob_info->oshob_minrev >= 4)) {
+ struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+ iatxl) + oshob_info->offs_add;
+ } else {
+ struct_offs = offsetof(struct scu_ipc_oshob_extend,
+ iatxl) + oshob_info->offs_add;
+ }
+
+ ret = intel_scu_ipc_read_oshob(
+ (u8 *)(&ia_trace),
+ 4,
+ struct_offs);
+
+ if (ret != 0) {
+ pr_err("Cannot read IA data\n");
+ goto exit;
+ }
+ }
+
+ if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+ for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+ pr_warn("[BOOT] SCU_TR[%d]=0x%08x\n", i, scu_trace[i]);
+ } else
+ pr_warn("[BOOT] SCU_TR=0x%08x (oshob)\n", scu_trace[0]);
+
+ pr_warn("[BOOT] IA_TR=0x%08x (oshob)\n", ia_trace);
+
+ /* Dumping OSNIB content */
+ ret = 0;
+ ret |= intel_scu_ipc_read_osnib_rr(&rr);
+ ret |= intel_scu_ipc_read_osnib_reset_ev1(&reset_ev1);
+ ret |= intel_scu_ipc_read_osnib_reset_ev2(&reset_ev2);
+ ret |= intel_scu_ipc_read_osnib_wd(&wd);
+ ret |= intel_scu_ipc_read_osnib_alarm(&alarm);
+ ret |= intel_scu_ipc_read_osnib_wakesrc(&wakesrc);
+
+ if (ret) {
+ pr_err("Cannot read OSNIB content\n");
+ goto exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+ if (osnib_target_oses[i].id == rr) {
+ pr_warn("[BOOT] RR=[%s] WD=0x%02x ALARM=0x%02x (osnib)\n",
+ osnib_target_oses[i].target_os_name, wd, alarm);
+ rr_found++;
+ break;
+ }
+ }
+
+ if (!rr_found)
+ pr_warn("[BOOT] RR=[UNKNOWN 0x%02x] WD=0x%02x ALARM=0x%02x (osnib)\n",
+ rr, wd, alarm);
+
+ for (i = 0; i < ARRAY_SIZE(osnib_wake_srcs); i++) {
+ if (osnib_wake_srcs[i].id == wakesrc) {
+ pr_warn("[BOOT] WAKESRC=[%s] (osnib)\n",
+ osnib_wake_srcs[i].wakesrc_name);
+ wksrc_found++;
+ break;
+ }
+ }
+
+ if (!wksrc_found)
+ pr_warn("[BOOT] WAKESRC=[UNKNOWN 0x%02x] (osnib)\n", wakesrc);
+
+ for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+ if (chip_reset_events[i].id == oshob_info->platform_type) {
+ pr_warn("[BOOT] %s=0x%02x %s=0x%02x (osnib)\n",
+ chip_reset_events[i].reset_ev1_name,
+ reset_ev1,
+ chip_reset_events[i].reset_ev2_name,
+ reset_ev2);
+ break;
+ }
+ }
+
+#endif /* DUMP_OSNIB */
+
+#ifdef CONFIG_DEBUG_FS
+ if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+ /* OEMNIB only usable with extended OSHOB structure. */
+ ret = intel_mid_scu_ipc_oemnib_debugfs_init();
+
+ if (ret != 0) {
+ pr_err("Cannot register OEMNIB interface to debugfs\n");
+ goto exit;
+ } else {
+ pr_info("OEMNIB interface registered to debugfs\n");
+ }
+ }
+#endif /* CONFIG_DEBUG_FS */
+
+exit:
+ return ret;
+}
+
+static int ipcutil_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ oshob_info = kmalloc(sizeof(struct scu_ipc_oshob_info), GFP_KERNEL);
+ if (oshob_info == NULL) {
+ pr_err(
+ "Cannot init ipc module: oshob info struct not allocated\n");
+ return -ENOMEM;
+ }
+
+ if (rpdev == NULL) {
+ pr_err("ipcutil rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed ipcutil rpmsg device\n");
+
+ /* Allocate rpmsg instance for mip*/
+ ret = alloc_rpmsg_instance(rpdev, &ipcutil_instance);
+ if (!ipcutil_instance) {
+ dev_err(&rpdev->dev, "kzalloc ipcutil instance failed\n");
+ goto out;
+ }
+
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(ipcutil_instance);
+
+ ret = oshob_init();
+ if (ret)
+ goto misc_err;
+
+ ret = misc_register(&scu_ipcutil);
+ if (ret) {
+ pr_err("misc register failed\n");
+ goto misc_err;
+ }
+
+ return ret;
+
+misc_err:
+ free_rpmsg_instance(rpdev, &ipcutil_instance);
+out:
+ kfree(oshob_info);
+ return ret;
+}
+
+static void ipcutil_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+#ifdef CONFIG_DEBUG_FS
+ if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+ /* OEMNIB only usable with extended OSHOB structure. */
+ /* unregister from debugfs. */
+ intel_mid_scu_ipc_oemnib_debugfs_exit();
+ }
+#endif /* CONFIG_DEBUG_FS */
+
+ kfree(oshob_info);
+
+ /* unregister scu_ipc_ioctl from sysfs. */
+ misc_deregister(&scu_ipcutil);
+ free_rpmsg_instance(rpdev, &ipcutil_instance);
+}
+
+static void ipcutil_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id ipcutil_rpmsg_id_table[] = {
+ { .name = "rpmsg_ipc_util" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, ipcutil_rpmsg_id_table);
+
+static struct rpmsg_driver ipcutil_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = ipcutil_rpmsg_id_table,
+ .probe = ipcutil_rpmsg_probe,
+ .callback = ipcutil_rpmsg_cb,
+ .remove = ipcutil_rpmsg_remove,
+};
+
+static int __init ipcutil_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&ipcutil_rpmsg);
}
-static void __exit ipc_module_exit(void)
+static void __exit ipcutil_rpmsg_exit(void)
{
- unregister_chrdev(major, "intel_mid_scu");
+ unregister_rpmsg_driver(&ipcutil_rpmsg);
}
-module_init(ipc_module_init);
-module_exit(ipc_module_exit);
+rootfs_initcall(ipcutil_rpmsg_init);
+module_exit(ipcutil_rpmsg_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Utility driver for intel scu ipc");
--- /dev/null
+/*
+ * intel_scu_mip.c: Driver for the Intel scu mip and umip access
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Shijie Zhang (shijie.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mip.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define DRIVER_NAME "intel_scu_mip"
+
+#define IPC_MIP_BASE 0xFFFD8000 /* sram base address for mip accessing*/
+#define IPC_MIP_MAX_ADDR 0x1000
+
+#define KOBJ_MIP_ATTR(_name, _mode, _show, _store) \
+ struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static struct kobject *scu_mip_kobj;
+static struct rpmsg_instance *mip_instance;
+static struct scu_mip_platform_data *pdata;
+
+static void __iomem *intel_mip_base;
+#define SECTOR_SIZE 512
+#define UMIP_TOTAL_CHKSUM_ENTRY 126
+#define UMIP_HEADER_HEADROOM_SECTOR 1
+#define UMIP_HEADER_SECTOR 0
+#define UMIP_HEADER_CHKSUM_ADDR 7
+#define UMIP_START_CHKSUM_ADDR 8
+#define UMIP_TOTAL_HEADER_SECTOR_NO 2
+
+#define UMIP_BLKDEVICE "mmcblk0boot0"
+
+static int xorblock(u32 *buf, u32 size)
+{
+ u32 cs = 0;
+
+ size >>= 2;
+ while (size--)
+ cs ^= *buf++;
+
+ return cs;
+}
+
+static u8 dword_to_byte_chksum(u32 dw)
+{
+ int n = 0;
+ u32 cs = dw;
+ for (n = 0; n < 3; n++) {
+ dw >>= 8;
+ cs ^= dw;
+ }
+
+ return (u8)cs;
+}
+
+static u8 calc_checksum(void *_buf, int size)
+{
+ int i;
+ u8 checksum = 0, *buf = (u8 *)_buf;
+
+ for (i = 0; i < size; i++)
+ checksum = checksum ^ (buf[i]);
+
+ return checksum;
+}
+
+static int mmcblk0boot0_match(struct device *dev, const void *data)
+{
+ if (strcmp(dev_name(dev), UMIP_BLKDEVICE) == 0)
+ return 1;
+
+ return 0;
+}
+
+static struct block_device *get_emmc_bdev(void)
+{
+ struct block_device *bdev;
+ struct device *emmc_disk;
+
+ emmc_disk = class_find_device(&block_class, NULL, NULL,
+ mmcblk0boot0_match);
+ if (emmc_disk == 0) {
+ pr_err("emmc not found!\n");
+ return NULL;
+ }
+
+ /* partition 0 means raw disk */
+ bdev = bdget_disk(dev_to_disk(emmc_disk), 0);
+ if (bdev == NULL) {
+ dev_err(emmc_disk, "unable to get disk\n");
+ return NULL;
+ }
+
+ /* Note: this bdev ref will be freed after first
+ * bdev_get/bdev_put cycle
+ */
+
+ return bdev;
+}
+
+
+static int read_mip(u8 *data, int len, int offset, int issigned)
+{
+ int ret;
+ u32 sptr, dptr, cmd, cmdid, data_off;
+
+ dptr = offset;
+ sptr = (len + 3) / 4;
+
+ cmdid = issigned ? IPC_CMD_SMIP_RD : IPC_CMD_UMIP_RD;
+ cmd = 4 << 16 | cmdid << 12 | IPCMSG_MIP_ACCESS;
+
+ do {
+ ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+ (u32 *)&data_off, 0, 1, sptr, dptr);
+
+ if (ret == -EIO)
+ msleep(20);
+ } while (ret == -EIO);
+
+ if (!ret)
+ memcpy(data, intel_mip_base + data_off, len);
+
+ return ret;
+}
+
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned)
+{
+ int ret = 0;
+ Sector sect;
+ struct block_device *bdev;
+ char *buffer = NULL;
+ int *holderId = NULL;
+ int sect_no, remainder;
+
+ /* Only SMIP read for Cloverview is supported */
+ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+ && (issigned != 1)) { /* CTP read UMIP from eMMC */
+
+ /* Opening the mmcblk0boot0 */
+ bdev = get_emmc_bdev();
+ if (bdev == NULL) {
+ pr_err("%s: get_emmc failed!\n", __func__);
+ return -ENODEV;
+ }
+
+ /* make sure the block device is open read only */
+ ret = blkdev_get(bdev, FMODE_READ, holderId);
+ if (ret < 0) {
+ pr_err("%s: blk_dev_get failed!\n", __func__);
+ return -ret;
+ }
+
+ /* Get sector number of where data located */
+ sect_no = offset / SECTOR_SIZE;
+ remainder = offset % SECTOR_SIZE;
+ buffer = read_dev_sector(bdev, sect_no +
+ UMIP_HEADER_HEADROOM_SECTOR, §);
+
+ /* Shouldn't need to access UMIP sector 0/1 */
+ if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+ pr_err("invalid umip offset\n");
+ ret = -EINVAL;
+ goto bd_put;
+ } else if (data == NULL || buffer == NULL) {
+ pr_err("buffer is empty\n");
+ ret = -ENODEV;
+ goto bd_put;
+ } else if (len > (SECTOR_SIZE - remainder)) {
+ pr_err("not enough data to read\n");
+ ret = -EINVAL;
+ goto bd_put;
+ }
+
+ memcpy(data, buffer + remainder, len);
+bd_put:
+ if (buffer)
+ put_dev_sector(sect);
+
+ blkdev_put(bdev, FMODE_READ);
+ return ret;
+ } else {
+
+ if (!intel_mip_base)
+ return -ENODEV;
+
+ if (offset + len > IPC_MIP_MAX_ADDR)
+ return -EINVAL;
+
+ rpmsg_global_lock();
+ ret = read_mip(data, len, offset, issigned);
+ rpmsg_global_unlock();
+
+ return ret;
+ }
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_mip);
+
+int get_smip_property_by_name(enum platform_prop pp)
+{
+ u8 data[SMIP_MAX_PROP_LEN];
+ int i, val, ret;
+ struct smip_platform_prop prop[SMIP_NUM_CONFIG_PROPS];
+
+ if (!pdata->smip_prop)
+ return -EINVAL;
+
+ for (i = 0; i < SMIP_NUM_CONFIG_PROPS; i++)
+ prop[i] = pdata->smip_prop[i];
+
+ /* Read the property requested by the caller */
+ ret = intel_scu_ipc_read_mip(data, prop[pp].len, prop[pp].offset, 1);
+ if (ret)
+ return ret;
+
+ /* Adjust the bytes according to the length and return the int */
+ val = data[0];
+ for (i = 1; i < prop[pp].len; i++)
+ val = val << 8 | data[i];
+
+ /* If the requested property is a bit field, return that bit value */
+ if (prop[pp].is_bit_field)
+ val &= prop[pp].mask;
+
+ return val;
+}
+EXPORT_SYMBOL(get_smip_property_by_name);
+
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset)
+{
+ int i, ret = 0, offset_align;
+ int remainder, len_align = 0;
+ u32 dptr, sptr, cmd;
+ u8 cs, tbl_cs = 0, *buf = NULL;
+ Sector sect;
+ struct block_device *bdev;
+ char *buffer = NULL;
+ int *holderId = NULL;
+ int sect_no;
+ u8 checksum;
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+
+ /* Opening the mmcblk0boot0 */
+ bdev = get_emmc_bdev();
+ if (bdev == NULL) {
+ pr_err("%s: get_emmc failed!\n", __func__);
+ return -ENODEV;
+ }
+
+ /* make sure the block device is open rw */
+ ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, holderId);
+ if (ret < 0) {
+ pr_err("%s: blk_dev_get failed!\n", __func__);
+ return -ret;
+ }
+
+ /* get memmap of the UMIP header */
+ sect_no = offset / SECTOR_SIZE;
+ remainder = offset % SECTOR_SIZE;
+ buffer = read_dev_sector(bdev, sect_no +
+ UMIP_HEADER_HEADROOM_SECTOR, §);
+
+ /* Shouldn't need to access UMIP sector 0/1 */
+ if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+ pr_err("invalid umip offset\n");
+ ret = -EINVAL;
+ goto bd_put;
+ } else if (data == NULL || buffer == NULL) {
+ pr_err("buffer is empty\n");
+ ret = -ENODEV;
+ goto bd_put;
+ } else if (len > (SECTOR_SIZE - remainder)) {
+ pr_err("too much data to write\n");
+ ret = -EINVAL;
+ goto bd_put;
+ }
+
+ lock_page(sect.v);
+ memcpy(buffer + remainder, data, len);
+ checksum = calc_checksum(buffer, SECTOR_SIZE);
+
+ set_page_dirty(sect.v);
+ unlock_page(sect.v);
+ sync_blockdev(bdev);
+ put_dev_sector(sect);
+
+ /*
+ * Updating the checksum, sector 0 (starting from UMIP
+ * offset 0x08), we maintains 4 bytes for tracking each of
+ * sector changes individually. For example, the dword at
+ * offset 0x08 is used to checksum data integrity of sector
+ * number 2, and so on so forth. It's worthnoting that only
+ * the first byte in each 4 bytes stores checksum.
+ * For detail, please check CTP FAS UMIP header definition
+ */
+
+ buffer = read_dev_sector(bdev, UMIP_HEADER_SECTOR +
+ UMIP_HEADER_HEADROOM_SECTOR, §);
+
+ if (buffer == NULL) {
+ pr_err("buffer is empty\n");
+ ret = -ENODEV;
+ goto bd_put;
+ }
+
+ lock_page(sect.v);
+ memcpy(buffer + 4 * (sect_no - UMIP_TOTAL_HEADER_SECTOR_NO) +
+ UMIP_START_CHKSUM_ADDR, &checksum, 1/* one byte */);
+
+ /* Change UMIP prologue chksum to zero */
+ *(buffer + UMIP_HEADER_CHKSUM_ADDR) = 0;
+
+ for (i = 0; i < UMIP_TOTAL_CHKSUM_ENTRY; i++) {
+ tbl_cs ^= *(u8 *)(buffer + 4 * i +
+ UMIP_START_CHKSUM_ADDR);
+ }
+
+ /* Finish up with re-calcuating UMIP prologue checksum */
+ cs = dword_to_byte_chksum(xorblock((u32 *)buffer,
+ SECTOR_SIZE));
+
+ *(buffer + UMIP_HEADER_CHKSUM_ADDR) = tbl_cs ^ cs;
+
+ set_page_dirty(sect.v);
+ unlock_page(sect.v);
+ sync_blockdev(bdev);
+bd_put:
+ if (buffer)
+ put_dev_sector(sect);
+
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ return ret;
+ } else {
+
+ if (!intel_mip_base)
+ return -ENODEV;
+
+ if (offset + len > IPC_MIP_MAX_ADDR)
+ return -EINVAL;
+
+ rpmsg_global_lock();
+
+ offset_align = offset & (~0x3);
+ len_align = (len + (offset - offset_align) + 3) & (~0x3);
+
+ if (len != len_align) {
+ buf = kzalloc(len_align, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Alloc memory failed\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ret = read_mip(buf, len_align, offset_align, 0);
+ if (ret)
+ goto fail;
+ memcpy(buf + offset - offset_align, data, len);
+ } else {
+ buf = data;
+ }
+
+ dptr = offset_align;
+ sptr = len_align / 4;
+ cmd = IPC_CMD_UMIP_WR << 12 | IPCMSG_MIP_ACCESS;
+
+ memcpy(intel_mip_base, buf, len_align);
+
+ do {
+ ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+ NULL, 0, 0, sptr, dptr);
+ if (ret == -EIO)
+ msleep(20);
+ } while (ret == -EIO);
+
+fail:
+ if (buf && len_align != len)
+ kfree(buf);
+
+ rpmsg_global_unlock();
+
+ return ret;
+ }
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_umip);
+
+
+#define MAX_DATA_NR 8
+#define MIP_CMD_LEN 11
+
+enum {
+ MIP_DBG_DATA,
+ MIP_DBG_LEN,
+ MIP_DBG_OFFSET,
+ MIP_DBG_ISSIGNED,
+ MIP_DBG_ERROR,
+};
+
+static u8 mip_data[MAX_DATA_NR];
+static int valid_data_nr;
+static int mip_len;
+static int mip_offset;
+static int mip_issigned;
+static int mip_dbg_error;
+static char mip_cmd[MIP_CMD_LEN];
+
+static ssize_t mip_generic_show(char *buf, int type, int *data)
+{
+ int i;
+ ssize_t ret = 0;
+
+ switch (type) {
+ case MIP_DBG_DATA:
+ for (i = 0; i < valid_data_nr; i++) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "data[%d]: %#x\n",
+ i, mip_data[i]);
+ }
+ break;
+ case MIP_DBG_LEN:
+ ret = snprintf(buf, PAGE_SIZE, "len: %d\n", *data);
+ break;
+ case MIP_DBG_OFFSET:
+ ret = snprintf(buf, PAGE_SIZE, "offset: %#x\n", *data);
+ break;
+ case MIP_DBG_ISSIGNED:
+ ret = snprintf(buf, PAGE_SIZE, "issigned: %d\n", *data);
+ break;
+ case MIP_DBG_ERROR:
+ ret = snprintf(buf, PAGE_SIZE, "error: %d\n", *data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void mip_generic_store(const char *buf, int type, int *data)
+{
+ int i, ret;
+
+ if (type == MIP_DBG_DATA) {
+ u32 t[MAX_DATA_NR];
+
+ valid_data_nr = 0;
+ memset(mip_data, 0, sizeof(mip_data));
+
+ ret = sscanf(buf, "%x %x %x %x %x %x %x %x", &t[0], &t[1],
+ &t[2], &t[3], &t[4], &t[5], &t[6], &t[7]);
+ if (ret == 0 || ret > MAX_DATA_NR) {
+ mip_dbg_error = -EINVAL;
+ return;
+ } else {
+ for (i = 0; i < ret; i++)
+ mip_data[i] = (u8)t[i];
+ valid_data_nr = ret;
+ }
+ } else {
+ *data = 0;
+ switch (type) {
+ case MIP_DBG_OFFSET:
+ ret = sscanf(buf, "%x", data);
+ break;
+ case MIP_DBG_LEN:
+ case MIP_DBG_ISSIGNED:
+ ret = sscanf(buf, "%d", data);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ }
+
+ if (ret)
+ mip_dbg_error = 0;
+ else
+ mip_dbg_error = -EINVAL;
+
+ return;
+}
+
+static ssize_t mip_data_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return mip_generic_show(buf, MIP_DBG_DATA, NULL);
+}
+
+static ssize_t mip_data_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ mip_generic_store(buf, MIP_DBG_DATA, NULL);
+ return size;
+}
+
+static ssize_t mip_len_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return mip_generic_show(buf, MIP_DBG_LEN, &mip_len);
+}
+
+static ssize_t mip_len_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ mip_generic_store(buf, MIP_DBG_LEN, &mip_len);
+ return size;
+}
+
+static ssize_t mip_offset_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return mip_generic_show(buf, MIP_DBG_OFFSET, &mip_offset);
+}
+
+static ssize_t mip_offset_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ mip_generic_store(buf, MIP_DBG_OFFSET, &mip_offset);
+ return size;
+}
+
+static ssize_t mip_issigned_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return mip_generic_show(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+}
+
+static ssize_t mip_issigned_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ mip_generic_store(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+ return size;
+}
+
+static ssize_t mip_error_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return mip_generic_show(buf, MIP_DBG_ERROR, &mip_dbg_error);
+}
+
+static ssize_t mip_cmd_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+
+ int ret;
+
+ memset(mip_cmd, 0, sizeof(mip_cmd));
+
+ ret = sscanf(buf, "%10s", mip_cmd);
+ if (ret == 0) {
+ mip_dbg_error = -EINVAL;
+ goto end;
+ }
+
+ if (!strncmp("read_mip", mip_cmd, MIP_CMD_LEN)) {
+ memset(mip_data, 0, sizeof(mip_data));
+ ret = intel_scu_ipc_read_mip(mip_data, mip_len, mip_offset,
+ mip_issigned);
+ if (!ret)
+ valid_data_nr = mip_len;
+
+ } else if (!strncmp("write_umip", mip_cmd, MIP_CMD_LEN)) {
+ if (mip_len == valid_data_nr) {
+ ret = intel_scu_ipc_write_umip(mip_data, mip_len,
+ mip_offset);
+ } else
+ goto error;
+ } else
+ goto error;
+
+ if (ret)
+ goto error;
+ else
+ goto end;
+
+error:
+ mip_dbg_error = -EINVAL;
+
+end:
+ return size;
+}
+
+static KOBJ_MIP_ATTR(data, S_IRUGO|S_IWUSR, mip_data_show, mip_data_store);
+static KOBJ_MIP_ATTR(len, S_IRUGO|S_IWUSR, mip_len_show, mip_len_store);
+static KOBJ_MIP_ATTR(offset, S_IRUGO|S_IWUSR, mip_offset_show,
+ mip_offset_store);
+static KOBJ_MIP_ATTR(issigned, S_IRUGO|S_IWUSR, mip_issigned_show,
+ mip_issigned_store);
+static KOBJ_MIP_ATTR(cmd, S_IWUSR, NULL, mip_cmd_store);
+static KOBJ_MIP_ATTR(error, S_IRUGO, mip_error_show, NULL);
+
+static struct attribute *mip_attrs[] = {
+ &data_attr.attr,
+ &len_attr.attr,
+ &offset_attr.attr,
+ &issigned_attr.attr,
+ &cmd_attr.attr,
+ &error_attr.attr,
+ NULL,
+};
+
+static struct attribute_group mip_attr_group = {
+ .name = "mip_debug",
+ .attrs = mip_attrs,
+};
+
+static int scu_mip_probe(struct platform_device *pdev)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL) {
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+ pdata =
+ (struct scu_mip_platform_data *)pdev->dev.platform_data;
+ }
+ return 0;
+}
+
+static int scu_mip_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct platform_device_id scu_mip_table[] = {
+ {DRIVER_NAME, 1 },
+};
+
+static struct platform_driver scu_mip_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = scu_mip_probe,
+ .remove = scu_mip_remove,
+ .id_table = scu_mip_table,
+};
+
+static int __init scu_mip_init(void)
+{
+ return platform_driver_register(&scu_mip_driver);
+}
+
+static void scu_mip_exit(void)
+{
+ platform_driver_unregister(&scu_mip_driver);
+}
+
+static int mip_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed mip rpmsg device\n");
+
+ /* Allocate rpmsg instance for mip*/
+ ret = alloc_rpmsg_instance(rpdev, &mip_instance);
+ if (!mip_instance) {
+ dev_err(&rpdev->dev, "kzalloc mip instance failed\n");
+ goto out;
+ }
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(mip_instance);
+
+ /* Init mip base */
+ intel_mip_base = ioremap_nocache(IPC_MIP_BASE, IPC_MIP_MAX_ADDR);
+ if (!intel_mip_base) {
+ ret = -ENOMEM;
+ goto rpmsg_err;
+ }
+
+ /* Create debugfs for mip regs */
+ scu_mip_kobj = kobject_create_and_add(mip_attr_group.name,
+ kernel_kobj);
+
+ if (!scu_mip_kobj) {
+ ret = -ENOMEM;
+ goto mip_base_err;
+ }
+
+ ret = sysfs_create_group(scu_mip_kobj, &mip_attr_group);
+
+ if (ret) {
+ kobject_put(scu_mip_kobj);
+ goto mip_base_err;
+ }
+
+ ret = scu_mip_init();
+ goto out;
+mip_base_err:
+ iounmap(intel_mip_base);
+rpmsg_err:
+ free_rpmsg_instance(rpdev, &mip_instance);
+out:
+ return ret;
+}
+
+static void mip_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ scu_mip_exit();
+ iounmap(intel_mip_base);
+ free_rpmsg_instance(rpdev, &mip_instance);
+ sysfs_remove_group(scu_mip_kobj, &mip_attr_group);
+ kobject_put(scu_mip_kobj);
+ dev_info(&rpdev->dev, "Removed mip rpmsg device\n");
+}
+
+static void mip_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id mip_rpmsg_id_table[] = {
+ { .name = "rpmsg_mip" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, mip_rpmsg_id_table);
+
+static struct rpmsg_driver mip_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = mip_rpmsg_id_table,
+ .probe = mip_rpmsg_probe,
+ .callback = mip_rpmsg_cb,
+ .remove = mip_rpmsg_remove,
+};
+
+static int __init mip_rpmsg_init(void)
+{
+ if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL)
+ && (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_CLOVERVIEW))
+ return -EINVAL;
+
+ return register_rpmsg_driver(&mip_rpmsg);
+}
+
+#ifdef MODULE
+module_init(mip_rpmsg_init);
+#else
+fs_initcall_sync(mip_rpmsg_init);
+#endif
+
+static void __exit mip_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&mip_rpmsg);
+}
+module_exit(mip_rpmsg_exit);
+
+MODULE_AUTHOR("Shijie Zhang <shijie.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel SCU MIP driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * pmic.c - Intel MSIC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define IPC_WWBUF_SIZE 20
+#define IPC_RWBUF_SIZE 20
+
+static struct kobject *scu_pmic_kobj;
+static struct rpmsg_instance *pmic_instance;
+
+static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 cmd, u32 sub)
+{
+ int i, err, inlen = 0, outlen = 0;
+
+ u8 wbuf[IPC_WWBUF_SIZE] = {};
+ u8 rbuf[IPC_RWBUF_SIZE] = {};
+
+ memset(wbuf, 0, sizeof(wbuf));
+
+ for (i = 0; i < count; i++) {
+ wbuf[inlen++] = addr[i] & 0xff;
+ wbuf[inlen++] = (addr[i] >> 8) & 0xff;
+ }
+
+ if (sub == IPC_CMD_PCNTRL_R) {
+ outlen = count > 0 ? ((count - 1) / 4) + 1 : 0;
+ } else if (sub == IPC_CMD_PCNTRL_W) {
+ if (count == 3)
+ inlen += 2;
+
+ for (i = 0; i < count; i++)
+ wbuf[inlen++] = data[i] & 0xff;
+
+ if (count == 3)
+ inlen -= 2;
+
+ outlen = 0;
+ } else if (sub == IPC_CMD_PCNTRL_M) {
+ wbuf[inlen++] = data[0] & 0xff;
+ wbuf[inlen++] = data[1] & 0xff;
+ outlen = 0;
+ } else
+ pr_err("IPC command not supported\n");
+
+ err = rpmsg_send_command(pmic_instance, cmd, sub, wbuf,
+ (u32 *)rbuf, inlen, outlen);
+
+ if (sub == IPC_CMD_PCNTRL_R) {
+ for (i = 0; i < count; i++)
+ data[i] = rbuf[i];
+ }
+
+ return err;
+}
+
+int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+{
+ return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+
+int intel_scu_ipc_iowrite8(u16 addr, u8 data)
+{
+ return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
+
+int intel_scu_ipc_iowrite32(u16 addr, u32 data)
+{
+ u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
+ return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
+
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
+{
+ if (len < 1 || len > 8)
+ return -EINVAL;
+
+ return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_readv);
+
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+{
+ if (len < 1 || len > 4)
+ return -EINVAL;
+
+ return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_writev);
+
+int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
+{
+ u8 data[2] = { bits, mask };
+ return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_register);
+
+/* pmic sysfs for debug */
+
+#define MAX_PMIC_REG_NR 4
+#define PMIC_OPS_LEN 10
+
+enum {
+ PMIC_DBG_ADDR,
+ PMIC_DBG_BITS,
+ PMIC_DBG_DATA,
+ PMIC_DBG_MASK,
+};
+
+static char *pmic_msg_format[] = {
+ "addr[%d]: %#x\n",
+ "bits[%d]: %#x\n",
+ "data[%d]: %#x\n",
+ "mask[%d]: %#x\n",
+};
+
+static u16 pmic_reg_addr[MAX_PMIC_REG_NR];
+static u8 pmic_reg_bits[MAX_PMIC_REG_NR];
+static u8 pmic_reg_data[MAX_PMIC_REG_NR];
+static u8 pmic_reg_mask[MAX_PMIC_REG_NR];
+static int valid_addr_nr;
+static int valid_bits_nr;
+static int valid_data_nr;
+static int valid_mask_nr;
+static char pmic_ops[PMIC_OPS_LEN];
+
+static int pmic_dbg_error;
+
+static ssize_t pmic_generic_show(char *buf, int valid, u8 *array, int type)
+{
+ int i, buf_size;
+ ssize_t ret = 0;
+
+ switch (type) {
+ case PMIC_DBG_ADDR:
+ for (i = 0; i < valid; i++) {
+ buf_size = PAGE_SIZE - ret;
+ ret += snprintf(buf + ret, buf_size,
+ pmic_msg_format[type],
+ i, pmic_reg_addr[i]);
+ }
+ break;
+ case PMIC_DBG_BITS:
+ case PMIC_DBG_DATA:
+ case PMIC_DBG_MASK:
+ for (i = 0; i < valid; i++) {
+ buf_size = PAGE_SIZE - ret;
+ ret += snprintf(buf + ret, buf_size,
+ pmic_msg_format[type],
+ i, array[i]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void pmic_generic_store(const char *buf, int *valid, u8 *array, int type)
+{
+ u32 tmp[MAX_PMIC_REG_NR];
+ int i, ret;
+
+ ret = sscanf(buf, "%x %x %x %x", &tmp[0], &tmp[1], &tmp[2], &tmp[3]);
+ if (ret == 0 || ret > MAX_PMIC_REG_NR) {
+ *valid = 0;
+ pmic_dbg_error = -EINVAL;
+ return;
+ }
+
+ *valid = ret;
+
+ switch (type) {
+ case PMIC_DBG_ADDR:
+ memset(pmic_reg_addr, 0, sizeof(pmic_reg_addr));
+ for (i = 0; i < ret; i++)
+ pmic_reg_addr[i] = (u16)tmp[i];
+ break;
+ case PMIC_DBG_BITS:
+ case PMIC_DBG_DATA:
+ case PMIC_DBG_MASK:
+ memset(array, 0, sizeof(*array) * MAX_PMIC_REG_NR);
+ for (i = 0; i < ret; i++)
+ array[i] = (u8)tmp[i];
+ break;
+ default:
+ break;
+ }
+}
+
+static ssize_t pmic_addr_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return pmic_generic_show(buf, valid_addr_nr, NULL, PMIC_DBG_ADDR);
+}
+
+static ssize_t pmic_addr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ pmic_generic_store(buf, &valid_addr_nr, NULL, PMIC_DBG_ADDR);
+ return size;
+}
+
+static ssize_t pmic_bits_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return pmic_generic_show(buf, valid_bits_nr, pmic_reg_bits,
+ PMIC_DBG_BITS);
+}
+static ssize_t pmic_bits_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ pmic_generic_store(buf, &valid_bits_nr, pmic_reg_bits, PMIC_DBG_BITS);
+ return size;
+}
+
+static ssize_t pmic_data_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return pmic_generic_show(buf, valid_data_nr, pmic_reg_data,
+ PMIC_DBG_DATA);
+}
+
+static ssize_t pmic_data_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ pmic_generic_store(buf, &valid_data_nr, pmic_reg_data, PMIC_DBG_DATA);
+ return size;
+}
+
+static ssize_t pmic_mask_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return pmic_generic_show(buf, valid_mask_nr, pmic_reg_mask,
+ PMIC_DBG_MASK);
+}
+
+static ssize_t pmic_mask_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ pmic_generic_store(buf, &valid_mask_nr, pmic_reg_mask, PMIC_DBG_MASK);
+ return size;
+}
+
+static ssize_t pmic_ops_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ int i, ret;
+
+ memset(pmic_ops, 0, sizeof(pmic_ops));
+
+ ret = sscanf(buf, "%9s", pmic_ops);
+ if (ret == 0) {
+ pmic_dbg_error = -EINVAL;
+ goto end;
+ }
+
+ if (valid_addr_nr <= 0) {
+ pmic_dbg_error = -EINVAL;
+ goto end;
+ }
+
+ if (!strncmp("read", pmic_ops, PMIC_OPS_LEN)) {
+ valid_data_nr = valid_addr_nr;
+ for (i = 0; i < valid_addr_nr; i++) {
+ ret = intel_scu_ipc_ioread8(pmic_reg_addr[i],
+ &pmic_reg_data[i]);
+ if (ret) {
+ pmic_dbg_error = ret;
+ goto end;
+ }
+ }
+ } else if (!strncmp("write", pmic_ops, PMIC_OPS_LEN)) {
+ if (valid_addr_nr == valid_data_nr) {
+ for (i = 0; i < valid_addr_nr; i++) {
+ ret = intel_scu_ipc_iowrite8(pmic_reg_addr[i],
+ pmic_reg_data[i]);
+ if (ret) {
+ pmic_dbg_error = ret;
+ goto end;
+ }
+ }
+ } else {
+ pmic_dbg_error = -EINVAL;
+ goto end;
+ }
+ } else if (!strncmp("update", pmic_ops, PMIC_OPS_LEN)) {
+ if (valid_addr_nr == valid_mask_nr &&
+ valid_mask_nr == valid_bits_nr) {
+ for (i = 0; i < valid_addr_nr; i++) {
+ ret = intel_scu_ipc_update_register(
+ pmic_reg_addr[i],
+ pmic_reg_bits[i],
+ pmic_reg_mask[i]);
+ if (ret) {
+ pmic_dbg_error = ret;
+ goto end;
+ }
+ }
+ } else {
+ pmic_dbg_error = -EINVAL;
+ goto end;
+ }
+ } else {
+ pmic_dbg_error = -EINVAL;
+ goto end;
+ }
+
+ pmic_dbg_error = 0;
+
+end:
+ return size;
+}
+
+static ssize_t pmic_show_error(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", pmic_dbg_error);
+}
+
+static KOBJ_PMIC_ATTR(addr, S_IRUGO|S_IWUSR, pmic_addr_show, pmic_addr_store);
+static KOBJ_PMIC_ATTR(bits, S_IRUGO|S_IWUSR, pmic_bits_show, pmic_bits_store);
+static KOBJ_PMIC_ATTR(data, S_IRUGO|S_IWUSR, pmic_data_show, pmic_data_store);
+static KOBJ_PMIC_ATTR(mask, S_IRUGO|S_IWUSR, pmic_mask_show, pmic_mask_store);
+static KOBJ_PMIC_ATTR(ops, S_IWUSR, NULL, pmic_ops_store);
+static KOBJ_PMIC_ATTR(error, S_IRUGO, pmic_show_error, NULL);
+
+static struct attribute *pmic_attrs[] = {
+ &addr_attr.attr,
+ &bits_attr.attr,
+ &data_attr.attr,
+ &mask_attr.attr,
+ &ops_attr.attr,
+ &error_attr.attr,
+ NULL,
+};
+
+static struct attribute_group pmic_attr_group = {
+ .name = "pmic_debug",
+ .attrs = pmic_attrs,
+};
+
+static int pmic_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed pmic rpmsg device\n");
+
+ /* Allocate rpmsg instance for pmic*/
+ ret = alloc_rpmsg_instance(rpdev, &pmic_instance);
+ if (!pmic_instance) {
+ dev_err(&rpdev->dev, "kzalloc pmic instance failed\n");
+ goto out;
+ }
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(pmic_instance);
+
+ /* Create debugfs for pmic regs */
+ scu_pmic_kobj = kobject_create_and_add(pmic_attr_group.name,
+ kernel_kobj);
+
+ if (!scu_pmic_kobj) {
+ ret = -ENOMEM;
+ goto rpmsg_err;
+ }
+
+ ret = sysfs_create_group(scu_pmic_kobj, &pmic_attr_group);
+
+ if (ret) {
+ kobject_put(scu_pmic_kobj);
+ goto rpmsg_err;
+ }
+
+ goto out;
+
+rpmsg_err:
+ free_rpmsg_instance(rpdev, &pmic_instance);
+out:
+ return ret;
+}
+
+static void pmic_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ free_rpmsg_instance(rpdev, &pmic_instance);
+ sysfs_remove_group(scu_pmic_kobj, &pmic_attr_group);
+ kobject_put(scu_pmic_kobj);
+ dev_info(&rpdev->dev, "Removed pmic rpmsg device\n");
+}
+
+static void pmic_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id pmic_rpmsg_id_table[] = {
+ { .name = "rpmsg_pmic" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = pmic_rpmsg_id_table,
+ .probe = pmic_rpmsg_probe,
+ .callback = pmic_rpmsg_cb,
+ .remove = pmic_rpmsg_remove,
+};
+
+static int __init pmic_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&pmic_rpmsg);
+}
+
+#ifdef MODULE
+module_init(pmic_rpmsg_init);
+#else
+fs_initcall_sync(pmic_rpmsg_init);
+#endif
+
+static void __exit pmic_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&pmic_rpmsg);
+}
+module_exit(pmic_rpmsg_exit);
+
+MODULE_AUTHOR("Bin Yang<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel PMIC Driver");
+MODULE_LICENSE("GPL v2");
Say Y here to enable debugging messages for power supply class
and drivers.
+config PMIC_CCSM
+ tristate "PMIC CCSM driver"
+ select POWER_SUPPLY_BATTID
+ depends on INTEL_SCU_IPC && IIO
+ help
+ Say Y to include support for PMIC Charger Control State Machine driver
+ Driver for initializing and monitoring the CCSM in PMIC
+ This driver sets the CCSM registers and handles the PMIC
+ charger interrupts.
+
+config BQ24261_CHARGER
+ tristate "BQ24261 charger driver"
+ select POWER_SUPPLY_CHARGER
+ depends on I2C
+ help
+ Say Y to include support for BQ24261 Charger driver. This driver
+ makes use of power supply charging framework. So the driver gives
+ the charger hardware abstraction only. Charging logic is abstracted
+ in the charging framework.
+
config PDA_POWER
tristate "Generic PDA/phone power driver"
depends on !S390
ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
-power_supply-y := power_supply_core.o
-power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
-power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
+power_supply-y := power_supply_core.o
+power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
+power_supply-$(CONFIG_POWER_SUPPLY_CHARGER) += power_supply_charger.o
+power_supply-$(CONFIG_POWER_SUPPLY_BATTID) += battery_id.o
obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
+obj-$(CONFIG_POWER_SUPPLY_CHARGING_ALGO_PSE) += charging_algo_pse.o
obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_POWER_RESET) += reset/
+
+obj-$(CONFIG_BQ24261_CHARGER) += bq24261_charger.o
+obj-$(CONFIG_PMIC_CCSM) += pmic_ccsm.o
--- /dev/null
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+
+ATOMIC_NOTIFIER_HEAD(batt_id_notifier);
+
+static struct ps_batt_chg_prof *batt_property;
+static int batt_status;
+
+int batt_id_reg_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_reg_notifier);
+
+void batt_id_unreg_notifier(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_unreg_notifier);
+
+
+/**
+ * battery_prop_changed - Update properties when battery connection status
+ * changes
+ * @battery_conn_stat : The current connection status of battery
+ * @batt_prop : Address of the ps_batt_chg_prof structure with the updated
+ * values passed from the calling function
+ *
+ * Whenever the battery connection status changes this function will be called
+ * to indicate a change in the status and to update the status and value of
+ * properties
+ */
+void battery_prop_changed(int battery_conn_stat,
+ struct ps_batt_chg_prof *batt_prop)
+{
+ if (batt_status != battery_conn_stat) {
+ if (battery_conn_stat == POWER_SUPPLY_BATTERY_INSERTED)
+ batt_property = batt_prop;
+ else
+ batt_property = NULL;
+
+ batt_status = battery_conn_stat;
+ }
+
+ atomic_notifier_call_chain(&batt_id_notifier,
+ 0, &(batt_property));
+
+}
+EXPORT_SYMBOL_GPL(battery_prop_changed);
+
+/**
+ * get_batt_prop - Get the battery connection status and updated properties
+ * @batt_prop : battery properties structure copied to this address
+ */
+int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+ if (batt_property)
+ memcpy(batt_prop, batt_property,
+ sizeof(struct ps_batt_chg_prof));
+ else
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(get_batt_prop);
--- /dev/null
+/*
+ * bq24261_charger.c - BQ24261 Charger I2C client driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/power/bq24261_charger.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <asm/intel_scu_ipc.h>
+
+#define DEV_NAME "bq24261_charger"
+#define DEV_MANUFACTURER "TI"
+#define MODEL_NAME_SIZE 8
+#define DEV_MANUFACTURER_NAME_SIZE 4
+
+#define CHRG_TERM_WORKER_DELAY (30 * HZ)
+#define EXCEPTION_MONITOR_DELAY (60 * HZ)
+#define WDT_RESET_DELAY (15 * HZ)
+
+/* BQ24261 registers */
+#define BQ24261_STAT_CTRL0_ADDR 0x00
+#define BQ24261_CTRL_ADDR 0x01
+#define BQ24261_BATT_VOL_CTRL_ADDR 0x02
+#define BQ24261_VENDOR_REV_ADDR 0x03
+#define BQ24261_TERM_FCC_ADDR 0x04
+#define BQ24261_VINDPM_STAT_ADDR 0x05
+#define BQ24261_ST_NTC_MON_ADDR 0x06
+
+#define BQ24261_RESET_MASK (0x01 << 7)
+#define BQ24261_RESET_ENABLE (0x01 << 7)
+
+#define BQ24261_FAULT_MASK 0x07
+#define BQ24261_STAT_MASK (0x03 << 4)
+#define BQ24261_BOOST_MASK (0x01 << 6)
+#define BQ24261_TMR_RST_MASK (0x01 << 7)
+#define BQ24261_TMR_RST (0x01 << 7)
+
+#define BQ24261_ENABLE_BOOST (0x01 << 6)
+
+#define BQ24261_VOVP 0x01
+#define BQ24261_LOW_SUPPLY 0x02
+#define BQ24261_THERMAL_SHUTDOWN 0x03
+#define BQ24261_BATT_TEMP_FAULT 0x04
+#define BQ24261_TIMER_FAULT 0x05
+#define BQ24261_BATT_OVP 0x06
+#define BQ24261_NO_BATTERY 0x07
+#define BQ24261_STAT_READY 0x00
+
+#define BQ24261_STAT_CHRG_PRGRSS (0x01 << 4)
+#define BQ24261_STAT_CHRG_DONE (0x02 << 4)
+#define BQ24261_STAT_FAULT (0x03 << 4)
+
+#define BQ24261_CE_MASK (0x01 << 1)
+#define BQ24261_CE_DISABLE (0x01 << 1)
+
+#define BQ24261_HZ_MASK (0x01)
+#define BQ24261_HZ_ENABLE (0x01)
+
+#define BQ24261_ICHRG_MASK (0x1F << 3)
+#define BQ24261_ICHRG_100ma (0x01 << 3)
+#define BQ24261_ICHRG_200ma (0x01 << 4)
+#define BQ24261_ICHRG_400ma (0x01 << 5)
+#define BQ24261_ICHRG_800ma (0x01 << 6)
+#define BQ24261_ICHRG_1600ma (0x01 << 7)
+
+#define BQ24261_ITERM_MASK (0x03)
+#define BQ24261_ITERM_50ma (0x01 << 0)
+#define BQ24261_ITERM_100ma (0x01 << 1)
+#define BQ24261_ITERM_200ma (0x01 << 2)
+
+#define BQ24261_VBREG_MASK (0x3F << 2)
+
+#define BQ24261_INLMT_MASK (0x03 << 4)
+#define BQ24261_INLMT_100 0x00
+#define BQ24261_INLMT_150 (0x01 << 4)
+#define BQ24261_INLMT_500 (0x02 << 4)
+#define BQ24261_INLMT_900 (0x03 << 4)
+#define BQ24261_INLMT_1500 (0x04 << 4)
+#define BQ24261_INLMT_2500 (0x06 << 4)
+
+#define BQ24261_TE_MASK (0x01 << 2)
+#define BQ24261_TE_ENABLE (0x01 << 2)
+#define BQ24261_STAT_ENABLE_MASK (0x01 << 3)
+#define BQ24261_STAT_ENABLE (0x01 << 3)
+
+#define BQ24261_VENDOR_MASK (0x07 << 5)
+#define BQ24261_VENDOR (0x02 << 5)
+#define BQ24261_REV_MASK (0x07)
+#define BQ24261_2_3_REV (0x06)
+#define BQ24261_REV (0x02)
+#define BQ24260_REV (0x01)
+
+#define BQ24261_TS_MASK (0x01 << 3)
+#define BQ24261_TS_ENABLED (0x01 << 3)
+#define BQ24261_BOOST_ILIM_MASK (0x01 << 4)
+#define BQ24261_BOOST_ILIM_500ma (0x0)
+#define BQ24261_BOOST_ILIM_1A (0x01 << 4)
+
+#define BQ24261_SAFETY_TIMER_MASK (0x03 << 5)
+#define BQ24261_SAFETY_TIMER_40MIN 0x00
+#define BQ24261_SAFETY_TIMER_6HR (0x01 << 5)
+#define BQ24261_SAFETY_TIMER_9HR (0x02 << 5)
+#define BQ24261_SAFETY_TIMER_DISABLED (0x03 << 5)
+
+/* 1% above voltage max design to report over voltage */
+#define BQ24261_OVP_MULTIPLIER 1010
+#define BQ24261_OVP_RECOVER_MULTIPLIER 990
+#define BQ24261_DEF_BAT_VOLT_MAX_DESIGN 4200000
+
+/* Settings for Voltage / DPPM Register (05) */
+#define BQ24261_VBATT_LEVEL1 3700000
+#define BQ24261_VBATT_LEVEL2 3960000
+#define BQ24261_VINDPM_MASK (0x07)
+#define BQ24261_VINDPM_320MV (0x01 << 2)
+#define BQ24261_VINDPM_160MV (0x01 << 1)
+#define BQ24261_VINDPM_80MV (0x01 << 0)
+#define BQ24261_CD_STATUS_MASK (0x01 << 3)
+#define BQ24261_DPM_EN_MASK (0x01 << 4)
+#define BQ24261_DPM_EN_FORCE (0x01 << 4)
+#define BQ24261_LOW_CHG_MASK (0x01 << 5)
+#define BQ24261_LOW_CHG_EN (0x01 << 5)
+#define BQ24261_LOW_CHG_DIS (~BQ24261_LOW_CHG_EN)
+#define BQ24261_DPM_STAT_MASK (0x01 << 6)
+#define BQ24261_MINSYS_STAT_MASK (0x01 << 7)
+
+#define BQ24261_MIN_CC 500
+
+u16 bq24261_sfty_tmr[][2] = {
+ {0, BQ24261_SAFETY_TIMER_DISABLED}
+ ,
+ {40, BQ24261_SAFETY_TIMER_40MIN}
+ ,
+ {360, BQ24261_SAFETY_TIMER_6HR}
+ ,
+ {540, BQ24261_SAFETY_TIMER_9HR}
+ ,
+};
+
+
+u16 bq24261_inlmt[][2] = {
+ {100, BQ24261_INLMT_100}
+ ,
+ {150, BQ24261_INLMT_150}
+ ,
+ {500, BQ24261_INLMT_500}
+ ,
+ {900, BQ24261_INLMT_900}
+ ,
+ {1500, BQ24261_INLMT_1500}
+ ,
+ {2500, BQ24261_INLMT_2500}
+ ,
+};
+
+u16 bq24261_iterm[][2] = {
+ {0, 0x00}
+ ,
+ {50, BQ24261_ITERM_50ma}
+ ,
+ {100, BQ24261_ITERM_100ma}
+ ,
+ {150, BQ24261_ITERM_100ma | BQ24261_ITERM_50ma}
+ ,
+ {200, BQ24261_ITERM_200ma}
+ ,
+ {250, BQ24261_ITERM_200ma | BQ24261_ITERM_50ma}
+ ,
+ {300, BQ24261_ITERM_200ma | BQ24261_ITERM_100ma}
+ ,
+ {350, BQ24261_ITERM_200ma | BQ24261_ITERM_100ma | BQ24261_ITERM_50ma}
+ ,
+};
+
+u16 bq24261_cc[][2] = {
+
+ {500, 0x00}
+ ,
+ {600, BQ24261_ICHRG_100ma}
+ ,
+ {700, BQ24261_ICHRG_200ma}
+ ,
+ {800, BQ24261_ICHRG_100ma | BQ24261_ICHRG_200ma}
+ ,
+ {900, BQ24261_ICHRG_400ma}
+ ,
+ {1000, BQ24261_ICHRG_400ma | BQ24261_ICHRG_100ma}
+ ,
+ {1100, BQ24261_ICHRG_400ma | BQ24261_ICHRG_200ma}
+ ,
+ {1200, BQ24261_ICHRG_400ma | BQ24261_ICHRG_200ma | BQ24261_ICHRG_100ma}
+ ,
+ {1300, BQ24261_ICHRG_800ma}
+ ,
+ {1400, BQ24261_ICHRG_800ma | BQ24261_ICHRG_100ma}
+ ,
+ {1500, BQ24261_ICHRG_800ma | BQ24261_ICHRG_200ma}
+ ,
+};
+
+#define BQ24261_MIN_CV 3500
+#define BQ24261_MAX_CV 4440
+#define BQ24261_CV_DIV 20
+#define BQ24261_CV_BIT_POS 2
+
+static enum power_supply_property bq24261_usb_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_INLMT,
+ POWER_SUPPLY_PROP_ENABLE_CHARGING,
+ POWER_SUPPLY_PROP_ENABLE_CHARGER,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+ POWER_SUPPLY_PROP_CABLE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MAX_TEMP,
+ POWER_SUPPLY_PROP_MIN_TEMP,
+};
+
+enum bq24261_chrgr_stat {
+ BQ24261_CHRGR_STAT_UNKNOWN,
+ BQ24261_CHRGR_STAT_READY,
+ BQ24261_CHRGR_STAT_CHARGING,
+ BQ24261_CHRGR_STAT_BAT_FULL,
+ BQ24261_CHRGR_STAT_FAULT,
+};
+
+struct bq24261_otg_event {
+ struct list_head node;
+ bool is_enable;
+};
+
+struct bq24261_charger {
+
+ struct mutex lock;
+ struct i2c_client *client;
+ struct bq24261_plat_data *pdata;
+ struct power_supply psy_usb;
+ struct delayed_work sw_term_work;
+ struct delayed_work wdt_work;
+ struct delayed_work low_supply_fault_work;
+ struct delayed_work exception_mon_work;
+ struct notifier_block otg_nb;
+ struct usb_phy *transceiver;
+ struct work_struct otg_work;
+ struct work_struct irq_work;
+ struct list_head otg_queue;
+ struct list_head irq_queue;
+ wait_queue_head_t wait_ready;
+ spinlock_t otg_queue_lock;
+ void __iomem *irq_iomap;
+
+ int chrgr_health;
+ int bat_health;
+ int cc;
+ int cv;
+ int inlmt;
+ int max_cc;
+ int max_cv;
+ int iterm;
+ int cable_type;
+ int cntl_state;
+ int max_temp;
+ int min_temp;
+ int revision;
+ enum bq24261_chrgr_stat chrgr_stat;
+ bool online;
+ bool present;
+ bool is_charging_enabled;
+ bool is_charger_enabled;
+ bool is_vsys_on;
+ bool boost_mode;
+ bool is_hw_chrg_term;
+ char model_name[MODEL_NAME_SIZE];
+ char manufacturer[DEV_MANUFACTURER_NAME_SIZE];
+};
+
+enum bq2426x_model_num {
+ BQ2426X = 0,
+ BQ24260,
+ BQ24261,
+};
+
+struct bq2426x_model {
+ char model_name[MODEL_NAME_SIZE];
+ enum bq2426x_model_num model;
+};
+
+static struct bq2426x_model bq24261_model_name[] = {
+ { "bq2426x", BQ2426X },
+ { "bq24260", BQ24260 },
+ { "bq24261", BQ24261 },
+};
+
+struct i2c_client *bq24261_client;
+static inline int get_battery_voltage(int *volt);
+static inline int get_battery_current(int *cur);
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg);
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm);
+
+enum power_supply_type get_power_supply_type(
+ enum power_supply_charger_cable_type cable)
+{
+
+ switch (cable) {
+
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ return POWER_SUPPLY_TYPE_USB_DCP;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ return POWER_SUPPLY_TYPE_USB_CDP;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ return POWER_SUPPLY_TYPE_USB_ACA;
+ case POWER_SUPPLY_CHARGER_TYPE_AC:
+ return POWER_SUPPLY_TYPE_MAINS;
+ case POWER_SUPPLY_CHARGER_TYPE_NONE:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ default:
+ return POWER_SUPPLY_TYPE_USB;
+ }
+
+ return POWER_SUPPLY_TYPE_USB;
+}
+
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+ int i;
+ for (i = 1; i < size; ++i)
+ if (in_val < tbl[i][0])
+ break;
+
+ *out_val = (u8) tbl[i - 1][1];
+}
+
+void bq24261_cc_to_reg(int cc, u8 *reg_val)
+{
+ return lookup_regval(bq24261_cc, ARRAY_SIZE(bq24261_cc), cc, reg_val);
+
+}
+
+void bq24261_cv_to_reg(int cv, u8 *reg_val)
+{
+ int val;
+
+ val = clamp_t(int, cv, BQ24261_MIN_CV, BQ24261_MAX_CV);
+ *reg_val =
+ (((val - BQ24261_MIN_CV) / BQ24261_CV_DIV)
+ << BQ24261_CV_BIT_POS);
+}
+
+void bq24261_inlmt_to_reg(int inlmt, u8 *regval)
+{
+ return lookup_regval(bq24261_inlmt, ARRAY_SIZE(bq24261_inlmt),
+ inlmt, regval);
+}
+
+static inline void bq24261_iterm_to_reg(int iterm, u8 *regval)
+{
+ return lookup_regval(bq24261_iterm, ARRAY_SIZE(bq24261_iterm),
+ iterm, regval);
+}
+
+static inline void bq24261_sfty_tmr_to_reg(int tmr, u8 *regval)
+{
+ return lookup_regval(bq24261_sfty_tmr, ARRAY_SIZE(bq24261_sfty_tmr),
+ tmr, regval);
+}
+
+static inline int bq24261_read_reg(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ dev_err(&client->dev, "Error(%d) in reading reg %d\n", ret,
+ reg);
+
+ return ret;
+}
+
+
+static inline void bq24261_dump_regs(bool dump_master)
+{
+ int i;
+ int ret;
+ int bat_cur, bat_volt;
+ struct bq24261_charger *chip;
+
+ if (!bq24261_client)
+ return;
+
+ chip = i2c_get_clientdata(bq24261_client);
+
+ ret = get_battery_current(&bat_cur);
+ if (ret)
+ dev_err(&bq24261_client->dev,
+ "%s: Error in getting battery current", __func__);
+ else
+ dev_info(&bq24261_client->dev, "Battery Current=%dma\n",
+ (bat_cur/1000));
+
+ ret = get_battery_voltage(&bat_volt);
+ if (ret)
+ dev_err(&bq24261_client->dev,
+ "%s: Error in getting battery voltage", __func__);
+ else
+ dev_info(&bq24261_client->dev, "Battery VOlatge=%dmV\n",
+ (bat_volt/1000));
+
+
+ dev_info(&bq24261_client->dev, "BQ24261 Register dump\n");
+
+ dev_info(&bq24261_client->dev, "*======================*\n");
+ for (i = 0; i < 7; ++i) {
+ ret = bq24261_read_reg(bq24261_client, i);
+ if (ret < 0)
+ dev_err(&bq24261_client->dev,
+ "Error in reading REG 0x%X\n", i);
+ else
+ dev_info(&bq24261_client->dev,
+ "0x%X=0x%X ", i, ret);
+ }
+ dev_info(&bq24261_client->dev, "*======================*\n");
+
+ if (chip->pdata->dump_master_regs && dump_master)
+ chip->pdata->dump_master_regs();
+
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+static int bq24261_reg_show(struct seq_file *seq, void *unused)
+{
+ int val;
+ u8 reg;
+
+ reg = *((u8 *)seq->private);
+ val = bq24261_read_reg(bq24261_client, reg);
+
+ seq_printf(seq, "0x%02x\n", val);
+ return 0;
+}
+
+static int bq24261_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bq24261_reg_show, inode->i_private);
+}
+
+static u32 bq24261_register_set[] = {
+ BQ24261_STAT_CTRL0_ADDR,
+ BQ24261_CTRL_ADDR,
+ BQ24261_BATT_VOL_CTRL_ADDR,
+ BQ24261_VENDOR_REV_ADDR,
+ BQ24261_TERM_FCC_ADDR,
+ BQ24261_VINDPM_STAT_ADDR,
+ BQ24261_ST_NTC_MON_ADDR,
+};
+
+static struct dentry *bq24261_dbgfs_dir;
+
+static const struct file_operations bq24261_dbg_fops = {
+ .open = bq24261_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static void bq24261_debugfs_init(void)
+{
+ struct dentry *fentry;
+ u32 count = ARRAY_SIZE(bq24261_register_set);
+ u32 i;
+ char name[6] = {0};
+
+ bq24261_dbgfs_dir = debugfs_create_dir(DEV_NAME, NULL);
+ if (bq24261_dbgfs_dir == NULL)
+ goto debugfs_root_exit;
+
+ for (i = 0; i < count; i++) {
+ snprintf(name, 6, "%02x", bq24261_register_set[i]);
+ fentry = debugfs_create_file(name, S_IRUGO,
+ bq24261_dbgfs_dir,
+ &bq24261_register_set[i],
+ &bq24261_dbg_fops);
+ if (fentry == NULL)
+ goto debugfs_err_exit;
+ }
+ dev_err(&bq24261_client->dev, "Debugfs created successfully!!\n");
+ return;
+
+debugfs_err_exit:
+ debugfs_remove_recursive(bq24261_dbgfs_dir);
+debugfs_root_exit:
+ dev_err(&bq24261_client->dev, "Error Creating debugfs!!\n");
+ return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+ if (bq24261_dbgfs_dir)
+ debugfs_remove_recursive(bq24261_dbgfs_dir);
+
+ return;
+}
+
+#else
+static void bq24261_debugfs_init(void)
+{
+ return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+ return;
+}
+#endif
+
+static inline int bq24261_write_reg(struct i2c_client *client, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_err(&client->dev, "Error(%d) in writing %d to reg %d\n",
+ ret, data, reg);
+
+ return ret;
+}
+
+static inline int bq24261_read_modify_reg(struct i2c_client *client, u8 reg,
+ u8 mask, u8 val)
+{
+ int ret;
+
+ ret = bq24261_read_reg(client, reg);
+ if (ret < 0)
+ return ret;
+ ret = (ret & ~mask) | (mask & val);
+ return bq24261_write_reg(client, reg, ret);
+}
+
+static inline int bq24261_tmr_ntc_init(struct bq24261_charger *chip)
+{
+ u8 reg_val;
+ int ret;
+
+ bq24261_sfty_tmr_to_reg(chip->pdata->safety_timer, ®_val);
+
+ if (chip->pdata->is_ts_enabled)
+ reg_val |= BQ24261_TS_ENABLED;
+
+ /* Check if boost mode current configuration is above 1A*/
+ if (chip->pdata->boost_mode_ma >= 1000)
+ reg_val |= BQ24261_BOOST_ILIM_1A;
+
+ ret = bq24261_read_modify_reg(chip->client, BQ24261_ST_NTC_MON_ADDR,
+ BQ24261_TS_MASK|BQ24261_SAFETY_TIMER_MASK|
+ BQ24261_BOOST_ILIM_MASK, reg_val);
+
+ return ret;
+}
+
+static inline int bq24261_enable_charging(
+ struct bq24261_charger *chip, bool val)
+{
+ int ret;
+ u8 reg_val;
+ bool is_ready;
+
+ ret = bq24261_read_reg(chip->client,
+ BQ24261_STAT_CTRL0_ADDR);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "Error(%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+ }
+
+ is_ready = (ret & BQ24261_STAT_MASK) != BQ24261_STAT_FAULT;
+
+ /* If status is fault, wait for READY before enabling the charging */
+
+ if (!is_ready) {
+ ret = wait_event_timeout(chip->wait_ready,
+ (chip->chrgr_stat != BQ24261_CHRGR_STAT_READY),
+ HZ);
+ dev_info(&chip->client->dev,
+ "chrgr_stat=%x\n", chip->chrgr_stat);
+ if (ret == 0) {
+ dev_err(&chip->client->dev,
+ "Waiting for Charger Ready Failed.Enabling charging anyway\n");
+ }
+ }
+
+ if (chip->pdata->enable_charging)
+ chip->pdata->enable_charging(val);
+
+ if (val) {
+ reg_val = (~BQ24261_CE_DISABLE & BQ24261_CE_MASK);
+ if (chip->is_hw_chrg_term)
+ reg_val |= BQ24261_TE_ENABLE;
+ } else {
+ reg_val = BQ24261_CE_DISABLE;
+ }
+
+ reg_val |= BQ24261_STAT_ENABLE;
+
+ ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+ BQ24261_STAT_ENABLE_MASK|BQ24261_RESET_MASK|
+ BQ24261_CE_MASK|BQ24261_TE_MASK,
+ reg_val);
+ if (ret || !val)
+ return ret;
+
+ bq24261_set_iterm(chip, chip->iterm);
+ return bq24261_tmr_ntc_init(chip);
+}
+
+static inline int bq24261_reset_timer(struct bq24261_charger *chip)
+{
+ return bq24261_read_modify_reg(chip->client, BQ24261_STAT_CTRL0_ADDR,
+ BQ24261_TMR_RST_MASK, BQ24261_TMR_RST);
+}
+
+static inline int bq24261_enable_charger(
+ struct bq24261_charger *chip, int val)
+{
+
+ /* TODO: Implement enable/disable HiZ mode to enable/
+ * disable charger
+ */
+ u8 reg_val;
+ int ret;
+
+ reg_val = val ? (~BQ24261_HZ_ENABLE & BQ24261_HZ_MASK) :
+ BQ24261_HZ_ENABLE;
+
+ ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+ BQ24261_HZ_MASK|BQ24261_RESET_MASK, reg_val);
+ if (ret)
+ return ret;
+
+ return bq24261_reset_timer(chip);
+}
+
+static inline int bq24261_set_cc(struct bq24261_charger *chip, int cc)
+{
+ u8 reg_val;
+ int ret;
+
+ dev_dbg(&chip->client->dev, "cc=%d\n", cc);
+ if (chip->pdata->set_cc) {
+ ret = chip->pdata->set_cc(cc);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (cc && (cc < BQ24261_MIN_CC)) {
+ dev_dbg(&chip->client->dev, "Set LOW_CHG bit\n");
+ reg_val = BQ24261_LOW_CHG_EN;
+ ret = bq24261_read_modify_reg(chip->client,
+ BQ24261_VINDPM_STAT_ADDR,
+ BQ24261_LOW_CHG_MASK, reg_val);
+ } else {
+ dev_dbg(&chip->client->dev, "Clear LOW_CHG bit\n");
+ reg_val = BQ24261_LOW_CHG_DIS;
+ ret = bq24261_read_modify_reg(chip->client,
+ BQ24261_VINDPM_STAT_ADDR,
+ BQ24261_LOW_CHG_MASK, reg_val);
+ }
+
+ /* Return from here since the cc setting will be done
+ by platform specific hardware */
+ if (chip->pdata->set_cc)
+ return ret;
+
+ bq24261_cc_to_reg(cc, ®_val);
+
+ return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+ BQ24261_ICHRG_MASK, reg_val);
+}
+
+static inline int bq24261_set_cv(struct bq24261_charger *chip, int cv)
+{
+ int bat_volt;
+ int ret;
+ u8 reg_val;
+ u8 vindpm_val = 0x0;
+
+ /*
+ * Setting VINDPM value as per the battery voltage
+ * VBatt Vindpm Register Setting
+ * < 3.7v 4.2v 0x0 (default)
+ * 3.71v - 3.96v 4.36v 0x2
+ * > 3.96v 4.6v 0x5
+ */
+ ret = get_battery_voltage(&bat_volt);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Error getting battery voltage!!\n");
+ } else {
+ if (bat_volt > BQ24261_VBATT_LEVEL2)
+ vindpm_val =
+ (BQ24261_VINDPM_320MV | BQ24261_VINDPM_80MV);
+ else if (bat_volt > BQ24261_VBATT_LEVEL1)
+ vindpm_val = BQ24261_VINDPM_160MV;
+ }
+
+ ret = bq24261_read_modify_reg(chip->client,
+ BQ24261_VINDPM_STAT_ADDR,
+ BQ24261_VINDPM_MASK,
+ vindpm_val);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Error setting VINDPM setting!!\n");
+ return ret;
+ }
+
+ if (chip->pdata->set_cv)
+ return chip->pdata->set_cv(cv);
+
+ bq24261_cv_to_reg(cv, ®_val);
+
+ return bq24261_read_modify_reg(chip->client, BQ24261_BATT_VOL_CTRL_ADDR,
+ BQ24261_VBREG_MASK, reg_val);
+}
+
+static inline int bq24261_set_inlmt(struct bq24261_charger *chip, int inlmt)
+{
+ u8 reg_val;
+
+ if (chip->pdata->set_inlmt)
+ return chip->pdata->set_inlmt(inlmt);
+
+ bq24261_inlmt_to_reg(inlmt, ®_val);
+
+ return bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+ BQ24261_RESET_MASK|BQ24261_INLMT_MASK, reg_val);
+
+}
+
+static inline void resume_charging(struct bq24261_charger *chip)
+{
+
+ if (chip->is_charger_enabled)
+ bq24261_enable_charger(chip, true);
+ if (chip->inlmt)
+ bq24261_set_inlmt(chip, chip->inlmt);
+ if (chip->cc)
+ bq24261_set_cc(chip, chip->cc);
+ if (chip->cv)
+ bq24261_set_cv(chip, chip->cv);
+ if (chip->is_charging_enabled)
+ bq24261_enable_charging(chip, true);
+}
+
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm)
+{
+ u8 reg_val;
+
+ if (chip->pdata->set_iterm)
+ return chip->pdata->set_iterm(iterm);
+
+ bq24261_iterm_to_reg(iterm, ®_val);
+
+ return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+ BQ24261_ITERM_MASK, reg_val);
+}
+
+static inline int bq24261_enable_hw_charge_term(
+ struct bq24261_charger *chip, bool val)
+{
+ u8 data;
+ int ret;
+
+ data = val ? BQ24261_TE_ENABLE : (~BQ24261_TE_ENABLE & BQ24261_TE_MASK);
+
+
+ ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+ BQ24261_RESET_MASK|BQ24261_TE_MASK, data);
+
+ if (ret)
+ return ret;
+
+ chip->is_hw_chrg_term = val ? true : false;
+
+ return ret;
+}
+
+static inline int bq24261_enable_boost_mode(
+ struct bq24261_charger *chip, int val)
+{
+ int ret = 0;
+
+
+ if (val) {
+
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+ if (chip->pdata->enable_vbus)
+ chip->pdata->enable_vbus(true);
+ }
+
+ /* TODO: Support different Host Mode Current limits */
+
+ bq24261_enable_charger(chip, true);
+ ret =
+ bq24261_read_modify_reg(chip->client,
+ BQ24261_STAT_CTRL0_ADDR,
+ BQ24261_BOOST_MASK,
+ BQ24261_ENABLE_BOOST);
+ if (unlikely(ret))
+ return ret;
+
+ ret = bq24261_tmr_ntc_init(chip);
+ if (unlikely(ret))
+ return ret;
+ chip->boost_mode = true;
+
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV)
+ schedule_delayed_work(&chip->wdt_work, 0);
+
+ dev_info(&chip->client->dev, "Boost Mode enabled\n");
+ } else {
+
+ ret =
+ bq24261_read_modify_reg(chip->client,
+ BQ24261_STAT_CTRL0_ADDR,
+ BQ24261_BOOST_MASK,
+ ~BQ24261_ENABLE_BOOST);
+
+ if (unlikely(ret))
+ return ret;
+ /* if charging need not to be enabled, disable
+ * the charger else keep the charger on
+ */
+ if (!chip->is_charging_enabled)
+ bq24261_enable_charger(chip, false);
+ chip->boost_mode = false;
+ dev_info(&chip->client->dev, "Boost Mode disabled\n");
+
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+ cancel_delayed_work_sync(&chip->wdt_work);
+
+ if (chip->pdata->enable_vbus)
+ chip->pdata->enable_vbus(false);
+ }
+
+ /* Notify power supply subsystem to enable charging
+ * if needed. Eg. if DC adapter is connected
+ */
+ power_supply_changed(&chip->psy_usb);
+ }
+
+ return ret;
+}
+
+static inline bool bq24261_is_vsys_on(struct bq24261_charger *chip)
+{
+ int ret;
+ struct i2c_client *client = chip->client;
+
+ ret = bq24261_read_reg(client, BQ24261_CTRL_ADDR);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error(%d) in reading BQ24261_CTRL_ADDR\n", ret);
+ return false;
+ }
+
+ if (((ret & BQ24261_HZ_MASK) == BQ24261_HZ_ENABLE) &&
+ chip->is_charger_enabled) {
+ dev_err(&client->dev, "Charger in Hi Z Mode\n");
+ bq24261_dump_regs(true);
+ return false;
+ }
+
+ ret = bq24261_read_reg(client, BQ24261_VINDPM_STAT_ADDR);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error(%d) in reading BQ24261_VINDPM_STAT_ADDR\n", ret);
+ return false;
+ }
+
+ if (ret & BQ24261_CD_STATUS_MASK) {
+ dev_err(&client->dev, "CD line asserted\n");
+ bq24261_dump_regs(true);
+ return false;
+ }
+
+ return true;
+}
+
+
+static inline bool bq24261_is_online(struct bq24261_charger *chip)
+{
+ if (chip->cable_type == POWER_SUPPLY_CHARGER_TYPE_NONE)
+ return false;
+ else if (!chip->is_charger_enabled)
+ return false;
+ /* BQ24261 gives interrupt only on stop/resume charging.
+ * If charging is already stopped, we need to query the hardware
+ * to see charger is still active and can supply vsys or not.
+ */
+ else if ((chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) ||
+ (!chip->is_charging_enabled))
+ return bq24261_is_vsys_on(chip);
+ else
+ return chip->is_vsys_on;
+}
+
+static int bq24261_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24261_charger *chip = container_of(psy,
+ struct bq24261_charger,
+ psy_usb);
+ int ret = 0;
+
+
+ mutex_lock(&chip->lock);
+
+
+ switch (psp) {
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ chip->present = val->intval;
+ /*If charging capable cable is present, then
+ hold the charger wakelock so that the target
+ does not enter suspend mode when charging is
+ in progress.
+ If charging cable has been removed, then
+ unlock the wakelock to allow the target to
+ enter the sleep mode*/
+/* if (!wake_lock_active(&chip->chrgr_en_wakelock) &&
+ val->intval)
+ wake_lock(&chip->chrgr_en_wakelock);
+ else if (wake_lock_active(&chip->chrgr_en_wakelock) &&
+ !val->intval)
+ wake_unlock(&chip->chrgr_en_wakelock);
+*/
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ chip->online = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+
+ ret = bq24261_enable_charging(chip, val->intval);
+
+ if (ret)
+ dev_err(&chip->client->dev,
+ "Error(%d) in %s charging", ret,
+ (val->intval ? "enable" : "disable"));
+ else
+ chip->is_charging_enabled = val->intval;
+
+ if (val->intval)
+ bq24261_enable_hw_charge_term(chip, true);
+ else
+ cancel_delayed_work_sync(&chip->sw_term_work);
+
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+
+ /* Don't enable the charger unless overvoltage is recovered */
+
+ if (chip->bat_health != POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+ ret = bq24261_enable_charger(chip, val->intval);
+
+ if (ret)
+ dev_err(&chip->client->dev,
+ "Error(%d) in %s charger", ret,
+ (val->intval ? "enable" : "disable"));
+ else
+ chip->is_charger_enabled = val->intval;
+ } else {
+ dev_info(&chip->client->dev, "Battery Over Voltage. Charger will be disabled\n");
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+ ret = bq24261_set_cc(chip, val->intval);
+ if (!ret)
+ chip->cc = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+ ret = bq24261_set_cv(chip, val->intval);
+ if (!ret)
+ chip->cv = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+ chip->max_cc = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+ chip->max_cv = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+ ret = bq24261_set_iterm(chip, val->intval);
+ if (!ret)
+ chip->iterm = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CABLE_TYPE:
+
+ chip->cable_type = val->intval;
+ chip->psy_usb.type = get_power_supply_type(chip->cable_type);
+ if (chip->cable_type != POWER_SUPPLY_CHARGER_TYPE_NONE) {
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+
+ /* Adding this processing in order to check
+ for any faults during connect */
+
+ ret = bq24261_read_reg(chip->client,
+ BQ24261_STAT_CTRL0_ADDR);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "Error (%d) in reading status register(0x00)\n",
+ ret);
+ else
+ bq24261_handle_irq(chip, ret);
+ } else {
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ cancel_delayed_work_sync(&chip->low_supply_fault_work);
+ }
+
+
+ break;
+ case POWER_SUPPLY_PROP_INLMT:
+ ret = bq24261_set_inlmt(chip, val->intval);
+ if (!ret)
+ chip->inlmt = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ chip->cntl_state = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_MAX_TEMP:
+ chip->max_temp = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_MIN_TEMP:
+ chip->min_temp = val->intval;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static int bq24261_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq24261_charger *chip = container_of(psy,
+ struct bq24261_charger,
+ psy_usb);
+
+ mutex_lock(&chip->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->present;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->online;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = chip->chrgr_health;
+ break;
+ case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+ val->intval = chip->max_cc;
+ break;
+ case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+ val->intval = chip->max_cv;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+ val->intval = chip->cc;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+ val->intval = chip->cv;
+ break;
+ case POWER_SUPPLY_PROP_INLMT:
+ val->intval = chip->inlmt;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+ val->intval = chip->iterm;
+ break;
+ case POWER_SUPPLY_PROP_CABLE_TYPE:
+ val->intval = chip->cable_type;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+ if (chip->boost_mode)
+ val->intval = false;
+ else
+ val->intval = (chip->is_charging_enabled &&
+ (chip->chrgr_stat == BQ24261_CHRGR_STAT_CHARGING));
+
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+ val->intval = bq24261_is_online(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ val->intval = chip->cntl_state;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = chip->pdata->num_throttle_states;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = chip->model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = chip->manufacturer;
+ break;
+ case POWER_SUPPLY_PROP_MAX_TEMP:
+ val->intval = chip->max_temp;
+ break;
+ case POWER_SUPPLY_PROP_MIN_TEMP:
+ val->intval = chip->min_temp;
+ break;
+ default:
+ mutex_unlock(&chip->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&chip->lock);
+ return 0;
+}
+
+static inline struct power_supply *get_psy_battery(void)
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+ static struct power_supply *pst;
+
+ class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ pst = (struct power_supply *)dev_get_drvdata(dev);
+ if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+ class_dev_iter_exit(&iter);
+ return pst;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ return NULL;
+}
+
+static inline int get_battery_voltage(int *volt)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = get_psy_battery();
+ if (!psy)
+ return -EINVAL;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ if (!ret)
+ *volt = (val.intval);
+
+ return ret;
+}
+
+static inline int get_battery_volt_max_design(int *volt)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = get_psy_battery();
+ if (!psy)
+ return -EINVAL;
+
+ ret = psy->get_property(psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &val);
+ if (!ret)
+ (*volt = val.intval);
+ return ret;
+}
+
+static inline int get_battery_current(int *cur)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = get_psy_battery();
+ if (!psy)
+ return -EINVAL;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW, &val);
+ if (!ret)
+ *cur = val.intval;
+
+ return ret;
+}
+
+static void bq24261_wdt_reset_worker(struct work_struct *work)
+{
+
+ struct bq24261_charger *chip = container_of(work,
+ struct bq24261_charger, wdt_work.work);
+ int ret;
+ ret = bq24261_reset_timer(chip);
+
+ if (ret)
+ dev_err(&chip->client->dev, "Error (%d) in WDT reset\n");
+ else
+ dev_info(&chip->client->dev, "WDT reset\n");
+
+ schedule_delayed_work(&chip->wdt_work, WDT_RESET_DELAY);
+}
+
+static void bq24261_sw_charge_term_worker(struct work_struct *work)
+{
+
+ struct bq24261_charger *chip = container_of(work,
+ struct bq24261_charger,
+ sw_term_work.work);
+
+ power_supply_changed(NULL);
+
+ schedule_delayed_work(&chip->sw_term_work,
+ CHRG_TERM_WORKER_DELAY);
+
+}
+
+int bq24261_get_bat_health(void)
+{
+
+ struct bq24261_charger *chip;
+
+ if (!bq24261_client)
+ return -ENODEV;
+
+ chip = i2c_get_clientdata(bq24261_client);
+
+ return chip->bat_health;
+}
+
+
+static void bq24261_low_supply_fault_work(struct work_struct *work)
+{
+ struct bq24261_charger *chip = container_of(work,
+ struct bq24261_charger,
+ low_supply_fault_work.work);
+
+ if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) {
+ dev_err(&chip->client->dev, "Low Supply Fault detected!!\n");
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_DEAD;
+ power_supply_changed(&chip->psy_usb);
+ bq24261_dump_regs(true);
+ }
+ return;
+}
+
+
+/* is_bat_over_voltage: check battery is over voltage or not
+* @chip: bq24261_charger context
+*
+* This function is used to verify the over voltage condition.
+* In some scenarios, HW generates Over Voltage exceptions when
+* battery voltage is normal. This function uses the over voltage
+* condition (voltage_max_design * 1.01) to verify battery is really
+* over charged or not.
+*/
+
+static bool is_bat_over_voltage(struct bq24261_charger *chip,
+ bool verify_recovery)
+{
+
+ int bat_volt, bat_volt_max_des, ret;
+
+ ret = get_battery_voltage(&bat_volt);
+ if (ret)
+ return verify_recovery ? false : true;
+
+ ret = get_battery_volt_max_design(&bat_volt_max_des);
+
+ if (ret)
+ bat_volt_max_des = BQ24261_DEF_BAT_VOLT_MAX_DESIGN;
+
+ dev_info(&chip->client->dev, "bat_volt=%d Voltage Max Design=%d OVP_VOLT=%d OVP recover volt=%d\n",
+ bat_volt, bat_volt_max_des,
+ (bat_volt_max_des/1000 * BQ24261_OVP_MULTIPLIER),
+ (bat_volt_max_des/1000 *
+ BQ24261_OVP_RECOVER_MULTIPLIER));
+ if (verify_recovery) {
+ if ((bat_volt) <= (bat_volt_max_des / 1000 *
+ BQ24261_OVP_RECOVER_MULTIPLIER))
+ return true;
+ else
+ return false;
+ } else {
+ if ((bat_volt) >= (bat_volt_max_des / 1000 *
+ BQ24261_OVP_MULTIPLIER))
+ return true;
+ else
+ return false;
+ }
+
+ return false;
+}
+
+#define IS_BATTERY_OVER_VOLTAGE(chip) \
+ is_bat_over_voltage(chip , false)
+
+#define IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip) \
+ is_bat_over_voltage(chip , true)
+
+static void handle_battery_over_voltage(struct bq24261_charger *chip)
+{
+ /* Set Health to Over Voltage. Disable charger to discharge
+ * battery to reduce the battery voltage.
+ */
+ chip->bat_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ bq24261_enable_charger(chip, false);
+ chip->is_charger_enabled = false;
+ cancel_delayed_work_sync(&chip->exception_mon_work);
+ schedule_delayed_work(&chip->exception_mon_work,
+ EXCEPTION_MONITOR_DELAY);
+}
+
+static void bq24261_exception_mon_work(struct work_struct *work)
+{
+ struct bq24261_charger *chip = container_of(work,
+ struct bq24261_charger,
+ exception_mon_work.work);
+ /* Only overvoltage exception need to monitor.*/
+ if (IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip)) {
+ dev_info(&chip->client->dev, "Over Voltage Exception Recovered\n");
+ chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+ bq24261_enable_charger(chip, true);
+ chip->is_charger_enabled = true;
+ resume_charging(chip);
+ } else {
+ schedule_delayed_work(&chip->exception_mon_work,
+ EXCEPTION_MONITOR_DELAY);
+ }
+}
+
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg)
+{
+ struct i2c_client *client = chip->client;
+ bool notify = true;
+
+ dev_info(&client->dev, "%s:%d stat=0x%x\n",
+ __func__, __LINE__, stat_reg);
+
+ switch (stat_reg & BQ24261_STAT_MASK) {
+ case BQ24261_STAT_READY:
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_READY;
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+ chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+ dev_info(&client->dev, "Charger Status: Ready\n");
+ notify = false;
+ break;
+ case BQ24261_STAT_CHRG_PRGRSS:
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_CHARGING;
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+ chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+ dev_info(&client->dev, "Charger Status: Charge Progress\n");
+ bq24261_dump_regs(false);
+ break;
+ case BQ24261_STAT_CHRG_DONE:
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+ chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+ dev_info(&client->dev, "Charger Status: Charge Done\n");
+
+ bq24261_enable_hw_charge_term(chip, false);
+ resume_charging(chip);
+ schedule_delayed_work(&chip->sw_term_work, 0);
+ break;
+
+ case BQ24261_STAT_FAULT:
+ break;
+ }
+
+ if (stat_reg & BQ24261_BOOST_MASK)
+ dev_info(&client->dev, "Boost Mode\n");
+
+ if ((stat_reg & BQ24261_STAT_MASK) == BQ24261_STAT_FAULT) {
+ bool dump_master = true;
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_FAULT;
+
+ switch (stat_reg & BQ24261_FAULT_MASK) {
+ case BQ24261_VOVP:
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ dev_err(&client->dev, "Charger OVP Fault\n");
+ break;
+
+ case BQ24261_LOW_SUPPLY:
+ notify = false;
+
+ if (chip->pdata->handle_low_supply)
+ chip->pdata->handle_low_supply();
+
+ if (chip->cable_type !=
+ POWER_SUPPLY_CHARGER_TYPE_NONE) {
+ schedule_delayed_work
+ (&chip->low_supply_fault_work,
+ 5*HZ);
+ dev_dbg(&client->dev,
+ "Schedule Low Supply Fault work!!\n");
+ }
+ break;
+
+ case BQ24261_THERMAL_SHUTDOWN:
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ dev_err(&client->dev, "Charger Thermal Fault\n");
+ break;
+
+ case BQ24261_BATT_TEMP_FAULT:
+ chip->bat_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ dev_err(&client->dev, "Battery Temperature Fault\n");
+ break;
+
+ case BQ24261_TIMER_FAULT:
+ chip->bat_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ dev_err(&client->dev, "Charger Timer Fault\n");
+ break;
+
+ case BQ24261_BATT_OVP:
+ notify = false;
+ if (chip->bat_health !=
+ POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+ if (!IS_BATTERY_OVER_VOLTAGE(chip)) {
+ chip->chrgr_stat =
+ BQ24261_CHRGR_STAT_UNKNOWN;
+ resume_charging(chip);
+ } else {
+ dev_err(&client->dev, "Battery Over Voltage Fault\n");
+ handle_battery_over_voltage(chip);
+ notify = true;
+ }
+ }
+ break;
+ case BQ24261_NO_BATTERY:
+ dev_err(&client->dev, "No Battery Connected\n");
+ break;
+
+ }
+
+ if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT && notify)
+ bq24261_dump_regs(dump_master);
+ }
+
+ wake_up(&chip->wait_ready);
+
+ chip->is_vsys_on = bq24261_is_vsys_on(chip);
+ if (notify)
+ power_supply_changed(&chip->psy_usb);
+
+ return 0;
+}
+
+static void bq24261_irq_worker(struct work_struct *work)
+{
+ struct bq24261_charger *chip =
+ container_of(work, struct bq24261_charger, irq_work);
+ int ret;
+
+ /*Lock to ensure that interrupt register readings are done
+ * and processed sequentially. The interrupt Fault registers
+ * are read on clear and without sequential processing double
+ * fault interrupts or fault recovery cannot be handlled propely
+ */
+
+ mutex_lock(&chip->lock);
+
+ dev_dbg(&chip->client->dev, "%s\n", __func__);
+
+ ret = bq24261_read_reg(chip->client, BQ24261_STAT_CTRL0_ADDR);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "Error (%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+ }
+ else {
+ bq24261_handle_irq(chip, ret);
+ }
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t bq24261_thread_handler(int id, void *data)
+{
+ struct bq24261_charger *chip = (struct bq24261_charger *)data;
+
+ queue_work(system_nrt_wq, &chip->irq_work);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bq24261_irq_handler(int irq, void *data)
+{
+ struct bq24261_charger *chip = (struct bq24261_charger *)data;
+ u8 intr_stat;
+
+ if (chip->irq_iomap) {
+ intr_stat = ioread8(chip->irq_iomap);
+ if ((intr_stat & chip->pdata->irq_mask)) {
+ dev_dbg(&chip->client->dev, "%s\n", __func__);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static void bq24261_boostmode_worker(struct work_struct *work)
+{
+ struct bq24261_charger *chip =
+ container_of(work, struct bq24261_charger, otg_work);
+ struct bq24261_otg_event *evt, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->otg_queue_lock, flags);
+ list_for_each_entry_safe(evt, tmp, &chip->otg_queue, node) {
+ list_del(&evt->node);
+ spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+
+ dev_info(&chip->client->dev,
+ "%s:%d state=%d\n", __FILE__, __LINE__,
+ evt->is_enable);
+ mutex_lock(&chip->lock);
+ if (evt->is_enable)
+ bq24261_enable_boost_mode(chip, 1);
+ else
+ bq24261_enable_boost_mode(chip, 0);
+
+ mutex_unlock(&chip->lock);
+ spin_lock_irqsave(&chip->otg_queue_lock, flags);
+ kfree(evt);
+
+ }
+ spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+
+ struct bq24261_charger *chip =
+ container_of(nb, struct bq24261_charger, otg_nb);
+ struct bq24261_otg_event *evt;
+
+ dev_dbg(&chip->client->dev, "OTG notification: %lu\n", event);
+ if (!param || event != USB_EVENT_DRIVE_VBUS)
+ return NOTIFY_DONE;
+
+ evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+ if (!evt) {
+ dev_err(&chip->client->dev,
+ "failed to allocate memory for OTG event\n");
+ return NOTIFY_DONE;
+ }
+
+ evt->is_enable = *(int *)param;
+ INIT_LIST_HEAD(&evt->node);
+
+ spin_lock(&chip->otg_queue_lock);
+ list_add_tail(&evt->node, &chip->otg_queue);
+ spin_unlock(&chip->otg_queue_lock);
+
+ queue_work(system_nrt_wq, &chip->otg_work);
+ return NOTIFY_OK;
+}
+
+static inline int register_otg_notifications(struct bq24261_charger *chip)
+{
+
+ int retval;
+
+ INIT_LIST_HEAD(&chip->otg_queue);
+ INIT_WORK(&chip->otg_work, bq24261_boostmode_worker);
+ spin_lock_init(&chip->otg_queue_lock);
+
+ chip->otg_nb.notifier_call = otg_handle_notification;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+ chip->transceiver = usb_get_transceiver();
+#else
+ chip->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+ if (!chip->transceiver || IS_ERR(chip->transceiver)) {
+ dev_err(&chip->client->dev, "failed to get otg transceiver\n");
+ return -EINVAL;
+ }
+ retval = usb_register_notifier(chip->transceiver, &chip->otg_nb);
+ if (retval) {
+ dev_err(&chip->client->dev,
+ "failed to register otg notifier\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum bq2426x_model_num bq24261_get_model(int bq24261_rev_reg)
+{
+ switch (bq24261_rev_reg & BQ24261_REV_MASK) {
+ case BQ24260_REV:
+ return BQ24260;
+ case BQ24261_REV:
+ case BQ24261_2_3_REV:
+ return BQ24261;
+ default:
+ return BQ2426X;
+ }
+}
+
+static int bq24261_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter;
+ struct bq24261_charger *chip;
+ int ret;
+ int bq2426x_rev;
+ enum bq2426x_model_num bq24261_rev_index;
+
+ adapter = to_i2c_adapter(client->dev.parent);
+
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "platform data is null");
+ return -EFAULT;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "I2C adapter %s doesn'tsupport BYTE DATA transfer\n",
+ adapter->name);
+ return -EIO;
+ }
+
+ bq2426x_rev = bq24261_read_reg(client, BQ24261_VENDOR_REV_ADDR);
+ if (bq2426x_rev < 0) {
+ dev_err(&client->dev,
+ "Error (%d) in reading BQ24261_VENDOR_REV_ADDR\n", bq2426x_rev);
+ return bq2426x_rev;
+ }
+ dev_info(&client->dev, "bq2426x revision: 0x%x found!!\n", bq2426x_rev);
+
+ bq24261_rev_index = bq24261_get_model(bq2426x_rev);
+ if ((bq2426x_rev & BQ24261_VENDOR_MASK) != BQ24261_VENDOR) {
+ dev_err(&client->dev,
+ "Invalid Vendor/Revision number in BQ24261_VENDOR_REV_ADDR: %d",
+ bq2426x_rev);
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&client->dev, "mem alloc failed\n");
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&chip->wait_ready);
+ i2c_set_clientdata(client, chip);
+ chip->pdata = client->dev.platform_data;
+
+ /* Remap IRQ map address to read the IRQ status */
+ if ((chip->pdata->irq_map) && (chip->pdata->irq_mask)) {
+ chip->irq_iomap = ioremap_nocache(chip->pdata->irq_map, 8);
+ if (!chip->irq_iomap) {
+ dev_err(&client->dev, "Failed: ioremap_nocache\n");
+ return -EFAULT;
+ }
+ }
+
+ chip->client = client;
+ chip->pdata = client->dev.platform_data;
+
+ chip->psy_usb.name = DEV_NAME;
+ chip->psy_usb.type = POWER_SUPPLY_TYPE_USB;
+ chip->psy_usb.properties = bq24261_usb_props;
+ chip->psy_usb.num_properties = ARRAY_SIZE(bq24261_usb_props);
+ chip->psy_usb.get_property = bq24261_usb_get_property;
+ chip->psy_usb.set_property = bq24261_usb_set_property;
+ chip->psy_usb.supplied_to = chip->pdata->supplied_to;
+ chip->psy_usb.num_supplicants = chip->pdata->num_supplicants;
+ chip->psy_usb.throttle_states = chip->pdata->throttle_states;
+ chip->psy_usb.num_throttle_states = chip->pdata->num_throttle_states;
+ chip->psy_usb.supported_cables = POWER_SUPPLY_CHARGER_TYPE_USB;
+ chip->max_cc = 1500;
+ chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+ chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ chip->revision = bq2426x_rev;
+
+ strncpy(chip->model_name,
+ bq24261_model_name[bq24261_rev_index].model_name,
+ MODEL_NAME_SIZE);
+ strncpy(chip->manufacturer, DEV_MANUFACTURER,
+ DEV_MANUFACTURER_NAME_SIZE);
+
+ mutex_init(&chip->lock);
+ ret = power_supply_register(&client->dev, &chip->psy_usb);
+ if (ret) {
+ dev_err(&client->dev, "Failed: power supply register (%d)\n",
+ ret);
+ iounmap(chip->irq_iomap);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&chip->sw_term_work, bq24261_sw_charge_term_worker);
+ INIT_DELAYED_WORK(&chip->low_supply_fault_work,
+ bq24261_low_supply_fault_work);
+ INIT_DELAYED_WORK(&chip->exception_mon_work,
+ bq24261_exception_mon_work);
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+ INIT_DELAYED_WORK(&chip->wdt_work,
+ bq24261_wdt_reset_worker);
+ }
+
+ INIT_WORK(&chip->irq_work, bq24261_irq_worker);
+ if (chip->client->irq) {
+ ret = request_threaded_irq(chip->client->irq,
+ bq24261_irq_handler,
+ bq24261_thread_handler,
+ IRQF_SHARED|IRQF_NO_SUSPEND,
+ DEV_NAME, chip);
+ if (ret) {
+ dev_err(&client->dev, "Failed: request_irq (%d)\n",
+ ret);
+ iounmap(chip->irq_iomap);
+ power_supply_unregister(&chip->psy_usb);
+ return ret;
+ }
+ }
+
+ if (IS_BATTERY_OVER_VOLTAGE(chip))
+ handle_battery_over_voltage(chip);
+ else
+ chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+
+ if (register_otg_notifications(chip))
+ dev_err(&client->dev, "Error in registering OTG notifications. Unable to supply power to Host\n");
+
+ bq24261_client = client;
+ power_supply_changed(&chip->psy_usb);
+ bq24261_debugfs_init();
+
+ return 0;
+}
+
+static int bq24261_remove(struct i2c_client *client)
+{
+ struct bq24261_charger *chip = i2c_get_clientdata(client);
+
+ if (client->irq)
+ free_irq(client->irq, chip);
+
+ flush_scheduled_work();
+ if (chip->irq_iomap)
+ iounmap(chip->irq_iomap);
+ if (chip->transceiver)
+ usb_unregister_notifier(chip->transceiver, &chip->otg_nb);
+
+ power_supply_unregister(&chip->psy_usb);
+ bq24261_debugfs_exit();
+ return 0;
+}
+
+static int bq24261_suspend(struct device *dev)
+{
+ struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+ if (chip->boost_mode)
+ cancel_delayed_work_sync(&chip->wdt_work);
+ }
+ dev_dbg(&chip->client->dev, "bq24261 suspend\n");
+ return 0;
+}
+
+static int bq24261_resume(struct device *dev)
+{
+ struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+ if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+ if (chip->boost_mode)
+ bq24261_enable_boost_mode(chip, 1);
+ }
+
+ dev_dbg(&chip->client->dev, "bq24261 resume\n");
+ return 0;
+}
+
+static int bq24261_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static int bq24261_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static int bq24261_runtime_idle(struct device *dev)
+{
+
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static const struct dev_pm_ops bq24261_pm_ops = {
+ .suspend = bq24261_suspend,
+ .resume = bq24261_resume,
+ .runtime_suspend = bq24261_runtime_suspend,
+ .runtime_resume = bq24261_runtime_resume,
+ .runtime_idle = bq24261_runtime_idle,
+};
+
+static const struct i2c_device_id bq24261_id[] = {
+ {DEV_NAME, 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, bq24261_id);
+
+static struct i2c_driver bq24261_driver = {
+ .driver = {
+ .name = DEV_NAME,
+ .pm = &bq24261_pm_ops,
+ },
+ .probe = bq24261_probe,
+ .remove = bq24261_remove,
+ .id_table = bq24261_id,
+};
+
+static int __init bq24261_init(void)
+{
+ return i2c_add_driver(&bq24261_driver);
+}
+
+module_init(bq24261_init);
+
+static void __exit bq24261_exit(void)
+{
+ i2c_del_driver(&bq24261_driver);
+}
+
+module_exit(bq24261_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("BQ24261 Charger Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/power/battery_id.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+/* 98% of CV is considered as voltage to detect Full */
+#define FULL_CV_MIN 98
+
+/* Offset to exit from maintenance charging. In maintenance charging
+* if the volatge is less than the (maintenance_lower_threshold -
+* MAINT_EXIT_OFFSET) then system can switch to normal charging
+*/
+#define MAINT_EXIT_OFFSET 50 /* mV */
+
+static int get_tempzone(struct ps_pse_mod_prof *pse_mod_bprof,
+ int temp)
+{
+
+ int i = 0;
+ int temp_range_cnt = min_t(u16, pse_mod_bprof->temp_mon_ranges,
+ BATT_TEMP_NR_RNG);
+
+ if ((temp < pse_mod_bprof->temp_low_lim) ||
+ (temp > pse_mod_bprof->temp_mon_range[0].temp_up_lim))
+ return -EINVAL;
+
+ for (i = 0; i < temp_range_cnt; ++i)
+ if (temp > pse_mod_bprof->temp_mon_range[i].temp_up_lim)
+ break;
+ return i-1;
+}
+
+static inline bool __is_battery_full
+ (long volt, long cur, long iterm, unsigned long cv)
+{
+ pr_devel("%s:current=%d pse_mod_bprof->chrg_term_mA =%d voltage_now=%d full_cond=%d",
+ __func__, cur, iterm, volt * 100, (FULL_CV_MIN * cv));
+
+ return ((cur > 0) && (cur <= iterm) &&
+ ((volt * 100) >= (FULL_CV_MIN * cv)));
+
+}
+
+static inline bool is_battery_full(struct batt_props bat_prop,
+ struct ps_pse_mod_prof *pse_mod_bprof, unsigned long cv)
+{
+
+ int i;
+ /* Software full detection. Check the battery charge current to detect
+ * battery Full. The voltage also verified to avoid false charge
+ * full detection.
+ */
+ pr_devel("%s:current=%d pse_mod_bprof->chrg_term_mA =%d bat_prop.voltage_now=%d full_cond=%d",
+ __func__, bat_prop.current_now, (pse_mod_bprof->chrg_term_mA),
+ bat_prop.voltage_now * 100, (FULL_CV_MIN * cv));
+
+ for (i = (MAX_CUR_VOLT_SAMPLES - 1); i >= 0; --i) {
+
+ if (!(__is_battery_full(bat_prop.voltage_now_cache[i],
+ bat_prop.current_now_cache[i],
+ pse_mod_bprof->chrg_term_mA, cv)))
+ return false;
+ }
+
+ return true;
+}
+
+static int pse_get_bat_thresholds(struct ps_batt_chg_prof bprof,
+ struct psy_batt_thresholds *bat_thresh)
+{
+ struct ps_pse_mod_prof *pse_mod_bprof =
+ (struct ps_pse_mod_prof *) bprof.batt_prof;
+
+ if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+ return -EINVAL;
+
+ bat_thresh->iterm = pse_mod_bprof->chrg_term_mA;
+ bat_thresh->temp_min = pse_mod_bprof->temp_low_lim;
+ bat_thresh->temp_max = pse_mod_bprof->temp_mon_range[0].temp_up_lim;
+
+ return 0;
+}
+
+static enum psy_algo_stat pse_get_next_cc_cv(struct batt_props bat_prop,
+ struct ps_batt_chg_prof bprof, unsigned long *cc, unsigned long *cv)
+{
+ int tzone;
+ struct ps_pse_mod_prof *pse_mod_bprof =
+ (struct ps_pse_mod_prof *) bprof.batt_prof;
+ enum psy_algo_stat algo_stat = bat_prop.algo_stat;
+ int maint_exit_volt;
+
+ *cc = *cv = 0;
+
+ /* If STATUS is discharging, assume that charger is not connected.
+ * If charger is not connected, no need to take any action.
+ * If charge profile type is not PSE_MOD_CHRG_PROF or the charge profile
+ * is not present, no need to take any action.
+ */
+
+ pr_devel("%s:battery status = %d algo_status=%d\n",
+ __func__, bat_prop.status, algo_stat);
+
+ if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+ return PSY_ALGO_STAT_NOT_CHARGE;
+
+ tzone = get_tempzone(pse_mod_bprof, bat_prop.temperature);
+
+ if (tzone < 0)
+ return PSY_ALGO_STAT_NOT_CHARGE;
+
+ /* Change the algo status to not charging, if battery is
+ * not really charging or less than maintenance exit threshold.
+ * This way algorithm can switch to normal
+ * charging if current status is full/maintenace
+ */
+ maint_exit_volt = pse_mod_bprof->
+ temp_mon_range[tzone].maint_chrg_vol_ll -
+ MAINT_EXIT_OFFSET;
+
+ if ((bat_prop.status == POWER_SUPPLY_STATUS_DISCHARGING) ||
+ (bat_prop.status == POWER_SUPPLY_STATUS_NOT_CHARGING) ||
+ bat_prop.voltage_now < maint_exit_volt) {
+
+ algo_stat = PSY_ALGO_STAT_NOT_CHARGE;
+
+ }
+
+ /* read cc and cv based on temperature and algorithm status*/
+ if (algo_stat == PSY_ALGO_STAT_FULL ||
+ algo_stat == PSY_ALGO_STAT_MAINT) {
+
+ /* if status is full and voltage is lower than maintenance lower
+ * threshold change status to maintenenance
+ */
+
+ if (algo_stat == PSY_ALGO_STAT_FULL && (bat_prop.voltage_now <=
+ pse_mod_bprof->temp_mon_range[tzone].maint_chrg_vol_ll))
+ algo_stat = PSY_ALGO_STAT_MAINT;
+
+ /* Read maintenance CC and CV */
+ if (algo_stat == PSY_ALGO_STAT_MAINT) {
+ *cv = pse_mod_bprof->temp_mon_range
+ [tzone].maint_chrg_vol_ul;
+ *cc = pse_mod_bprof->temp_mon_range
+ [tzone].maint_chrg_cur;
+ }
+ } else {
+ *cv = pse_mod_bprof->temp_mon_range[tzone].full_chrg_vol;
+ *cc = pse_mod_bprof->temp_mon_range[tzone].full_chrg_cur;
+ algo_stat = PSY_ALGO_STAT_CHARGE;
+ }
+
+ if (is_battery_full(bat_prop, pse_mod_bprof, *cv)) {
+ *cc = *cv = 0;
+ algo_stat = PSY_ALGO_STAT_FULL;
+ }
+
+ return algo_stat;
+}
+
+static int __init pse_algo_init(void)
+{
+ struct charging_algo pse_algo;
+ pse_algo.chrg_prof_type = PSE_MOD_CHRG_PROF;
+ pse_algo.name = "pse_algo";
+ pse_algo.get_next_cc_cv = pse_get_next_cc_cv;
+ pse_algo.get_batt_thresholds = pse_get_bat_thresholds;
+ power_supply_register_charging_algo(&pse_algo);
+ return 0;
+}
+
+module_init(pse_algo_init);
--- /dev/null
+/*
+ * pmic_ccsm.c - Intel MID PMIC Charger Driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+/* Includes */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/power_supply.h>
+#include <linux/power_supply.h>
+#include <linux/rpmsg.h>
+#include <linux/version.h>
+#include <asm/intel_basincove_gpadc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#include <linux/iio/consumer.h>
+#else
+#include "../../../kernel/drivers/staging/iio/consumer.h"
+#endif
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/sfi.h>
+#include <linux/async.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+#include "pmic_ccsm.h"
+
+/* Macros */
+#define DRIVER_NAME "pmic_ccsm"
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+#define ADC_TO_TEMP 1
+#define TEMP_TO_ADC 0
+#define is_valid_temp(tmp)\
+ (!(tmp > chc.pdata->adc_tbl[0].temp ||\
+ tmp < chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].temp))
+#define is_valid_adc_code(val)\
+ (!(val < chc.pdata->adc_tbl[0].adc_val ||\
+ val > chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].adc_val))
+#define CONVERT_ADC_TO_TEMP(adc_val, temp)\
+ adc_temp_conv(adc_val, temp, ADC_TO_TEMP)
+#define CONVERT_TEMP_TO_ADC(temp, adc_val)\
+ adc_temp_conv(temp, adc_val, TEMP_TO_ADC)
+#define NEED_ZONE_SPLIT(bprof)\
+ ((bprof->temp_mon_ranges < MIN_BATT_PROF))
+
+#define USB_WAKE_LOCK_TIMEOUT (5 * HZ)
+
+/* 100mA value definition for setting the inlimit in bq24261 */
+#define USBINPUTICC100VAL 100
+
+/* Type definitions */
+static void pmic_bat_zone_changed(void);
+static void pmic_battery_overheat_handler(bool);
+
+/* Extern definitions */
+
+/* Global declarations */
+static DEFINE_MUTEX(pmic_lock);
+static struct pmic_chrgr_drv_context chc;
+static struct interrupt_info chgrirq0_info[] = {
+ {
+ CHGIRQ0_BZIRQ_MASK,
+ 0,
+ "Battery temperature zone changed",
+ NULL,
+ NULL,
+ pmic_bat_zone_changed,
+ NULL,
+ },
+ {
+ CHGIRQ0_BAT_CRIT_MASK,
+ SCHGIRQ0_SBAT_CRIT_MASK,
+ NULL,
+ "Battery Over heat exception",
+ "Battery Over heat exception Recovered",
+ NULL,
+ pmic_battery_overheat_handler
+ },
+ {
+ CHGIRQ0_BAT0_ALRT_MASK,
+ SCHGIRQ0_SBAT0_ALRT_MASK,
+ NULL,
+ "Battery0 temperature inside boundary",
+ "Battery0 temperature outside boundary",
+ NULL,
+ pmic_battery_overheat_handler
+ },
+ {
+ CHGIRQ0_BAT1_ALRT_MASK,
+ SCHGIRQ0_SBAT1_ALRT_MASK,
+ NULL,
+ "Battery1 temperature inside boundary",
+ "Battery1 temperature outside boundary",
+ NULL,
+ NULL
+ },
+};
+
+u16 pmic_inlmt[][2] = {
+ { 100, CHGRCTRL1_FUSB_INLMT_100},
+ { 150, CHGRCTRL1_FUSB_INLMT_150},
+ { 500, CHGRCTRL1_FUSB_INLMT_500},
+ { 900, CHGRCTRL1_FUSB_INLMT_900},
+ { 1500, CHGRCTRL1_FUSB_INLMT_1500},
+};
+
+static inline struct power_supply *get_psy_battery(void)
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+ struct power_supply *pst;
+
+ class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ pst = (struct power_supply *)dev_get_drvdata(dev);
+ if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+ class_dev_iter_exit(&iter);
+ return pst;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ return NULL;
+}
+
+
+/* Function definitions */
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+ int i;
+ for (i = 1; i < size; ++i)
+ if (in_val < tbl[i][0])
+ break;
+
+ *out_val = (u8)tbl[i-1][1];
+}
+
+static int interpolate_y(int dx1x0, int dy1y0, int dxx0, int y0)
+{
+ return y0 + DIV_ROUND_CLOSEST((dxx0 * dy1y0), dx1x0);
+}
+
+static int interpolate_x(int dy1y0, int dx1x0, int dyy0, int x0)
+{
+ return x0 + DIV_ROUND_CLOSEST((dyy0 * dx1x0), dy1y0);
+}
+
+static int adc_temp_conv(int in_val, int *out_val, int conv)
+{
+ int tbl_row_cnt, i;
+ struct temp_lookup *adc_temp_tbl;
+
+ if (!chc.pdata) {
+ dev_err(chc.dev, "ADC-lookup table not yet available\n");
+ return -ERANGE;
+ }
+
+ tbl_row_cnt = chc.pdata->max_tbl_row_cnt;
+ adc_temp_tbl = chc.pdata->adc_tbl;
+
+ if (conv == ADC_TO_TEMP) {
+ if (!is_valid_adc_code(in_val))
+ return -ERANGE;
+
+ if (in_val == adc_temp_tbl[tbl_row_cnt-1].adc_val)
+ i = tbl_row_cnt - 1;
+ else {
+ for (i = 0; i < tbl_row_cnt; ++i)
+ if (in_val < adc_temp_tbl[i].adc_val)
+ break;
+ }
+
+ *out_val =
+ interpolate_y((adc_temp_tbl[i].adc_val
+ - adc_temp_tbl[i - 1].adc_val),
+ (adc_temp_tbl[i].temp
+ - adc_temp_tbl[i - 1].temp),
+ (in_val - adc_temp_tbl[i - 1].adc_val),
+ adc_temp_tbl[i - 1].temp);
+ } else {
+ if (!is_valid_temp(in_val))
+ return -ERANGE;
+
+ if (in_val == adc_temp_tbl[tbl_row_cnt-1].temp)
+ i = tbl_row_cnt - 1;
+ else {
+ for (i = 0; i < tbl_row_cnt; ++i)
+ if (in_val > adc_temp_tbl[i].temp)
+ break;
+ }
+
+ *((short int *)out_val) =
+ interpolate_x((adc_temp_tbl[i].temp
+ - adc_temp_tbl[i - 1].temp),
+ (adc_temp_tbl[i].adc_val
+ - adc_temp_tbl[i - 1].adc_val),
+ (in_val - adc_temp_tbl[i - 1].temp),
+ adc_temp_tbl[i - 1].adc_val);
+ }
+ return 0;
+}
+
+static int pmic_read_reg(u16 addr, u8 *val)
+{
+ int ret;
+
+ ret = intel_scu_ipc_ioread8(addr, val);
+ if (ret) {
+ dev_err(chc.dev,
+ "Error in intel_scu_ipc_ioread8 0x%.4x\n", addr);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+static int __pmic_write_tt(u8 addr, u8 data)
+{
+ int ret;
+
+ ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+ if (unlikely(ret))
+ return ret;
+
+ return intel_scu_ipc_iowrite8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_write_tt(u8 addr, u8 data)
+{
+ int ret;
+
+ mutex_lock(&pmic_lock);
+ ret = __pmic_write_tt(addr, data);
+ mutex_unlock(&pmic_lock);
+
+ /* If access is blocked return success to avoid additional
+ * error handling at client side
+ */
+ if (ret == -EACCES) {
+ dev_warn(chc.dev, "IPC write blocked due to unsigned kernel/invalid battery\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+static int __pmic_read_tt(u8 addr, u8 *data)
+{
+ int ret;
+
+ ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+ if (ret)
+ return ret;
+
+ usleep_range(2000, 3000);
+
+ return intel_scu_ipc_ioread8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_read_tt(u8 addr, u8 *data)
+{
+ int ret;
+
+ mutex_lock(&pmic_lock);
+ ret = __pmic_read_tt(addr, data);
+ mutex_unlock(&pmic_lock);
+
+ return ret;
+}
+
+static int pmic_update_tt(u8 addr, u8 mask, u8 data)
+{
+ u8 tdata;
+ int ret;
+
+ mutex_lock(&pmic_lock);
+ ret = __pmic_read_tt(addr, &tdata);
+ if (unlikely(ret))
+ goto exit;
+
+ tdata = (tdata & ~mask) | (data & mask);
+ ret = __pmic_write_tt(addr, tdata);
+exit:
+ mutex_unlock(&pmic_lock);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int pmic_chrgr_reg_show(struct seq_file *seq, void *unused)
+{
+ int ret;
+ u16 addr;
+ u16 val1;
+ u8 val;
+
+ addr = *((u8 *)seq->private);
+
+ if (addr == CHRGRIRQ1_ADDR) {
+ val1 = ioread16(chc.pmic_intr_iomap);
+ val = (u8)(val1 >> 8);
+ } else if (addr == CHGRIRQ0_ADDR) {
+ val1 = ioread16(chc.pmic_intr_iomap);
+ val = (u8)val1;
+ } else {
+ ret = pmic_read_reg(addr, &val);
+ if (ret != 0) {
+ dev_err(chc.dev,
+ "Error reading tt register 0x%2x\n",
+ addr);
+ return -EIO;
+ }
+ }
+
+ seq_printf(seq, "0x%x\n", val);
+ return 0;
+}
+
+static int pmic_chrgr_tt_reg_show(struct seq_file *seq, void *unused)
+{
+ int ret;
+ u8 addr;
+ u8 val;
+
+ addr = *((u8 *)seq->private);
+
+ ret = pmic_read_tt(addr, &val);
+ if (ret != 0) {
+ dev_err(chc.dev,
+ "Error reading tt register 0x%2x\n",
+ addr);
+ return -EIO;
+ }
+
+ seq_printf(seq, "0x%x\n", val);
+ return 0;
+}
+
+static int pmic_chrgr_tt_reg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmic_chrgr_tt_reg_show, inode->i_private);
+}
+
+static int pmic_chrgr_reg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmic_chrgr_reg_show, inode->i_private);
+}
+
+static struct dentry *charger_debug_dir;
+static struct pmic_regs_def pmic_regs_bc[] = {
+ PMIC_REG_DEF(PMIC_ID_ADDR),
+ PMIC_REG_DEF(IRQLVL1_ADDR),
+ PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+ PMIC_REG_DEF(CHGRIRQ0_ADDR),
+ PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+ PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+ PMIC_REG_DEF(LOWBATTDET0_ADDR),
+ PMIC_REG_DEF(LOWBATTDET1_ADDR),
+ PMIC_REG_DEF(BATTDETCTRL_ADDR),
+ PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+ PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+ PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+ PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+ PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+ PMIC_REG_DEF(CHGRCTRL0_ADDR),
+ PMIC_REG_DEF(CHGRCTRL1_ADDR),
+ PMIC_REG_DEF(CHGRSTATUS_ADDR),
+ PMIC_REG_DEF(USBIDCTRL_ADDR),
+ PMIC_REG_DEF(USBIDSTAT_ADDR),
+ PMIC_REG_DEF(WAKESRC_ADDR),
+ PMIC_REG_DEF(THRMBATZONE_ADDR_BC),
+ PMIC_REG_DEF(THRMZN0L_ADDR_BC),
+ PMIC_REG_DEF(THRMZN0H_ADDR_BC),
+ PMIC_REG_DEF(THRMZN1L_ADDR_BC),
+ PMIC_REG_DEF(THRMZN1H_ADDR_BC),
+ PMIC_REG_DEF(THRMZN2L_ADDR_BC),
+ PMIC_REG_DEF(THRMZN2H_ADDR_BC),
+ PMIC_REG_DEF(THRMZN3L_ADDR_BC),
+ PMIC_REG_DEF(THRMZN3H_ADDR_BC),
+ PMIC_REG_DEF(THRMZN4L_ADDR_BC),
+ PMIC_REG_DEF(THRMZN4H_ADDR_BC),
+};
+
+static struct pmic_regs_def pmic_regs_sc[] = {
+ PMIC_REG_DEF(PMIC_ID_ADDR),
+ PMIC_REG_DEF(IRQLVL1_ADDR),
+ PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+ PMIC_REG_DEF(CHGRIRQ0_ADDR),
+ PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+ PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+ PMIC_REG_DEF(LOWBATTDET0_ADDR),
+ PMIC_REG_DEF(LOWBATTDET1_ADDR),
+ PMIC_REG_DEF(BATTDETCTRL_ADDR),
+ PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+ PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+ PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+ PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+ PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+ PMIC_REG_DEF(CHGRCTRL0_ADDR),
+ PMIC_REG_DEF(CHGRCTRL1_ADDR),
+ PMIC_REG_DEF(CHGRSTATUS_ADDR),
+ PMIC_REG_DEF(USBIDCTRL_ADDR),
+ PMIC_REG_DEF(USBIDSTAT_ADDR),
+ PMIC_REG_DEF(WAKESRC_ADDR),
+ PMIC_REG_DEF(USBPATH_ADDR),
+ PMIC_REG_DEF(USBSRCDETSTATUS_ADDR),
+ PMIC_REG_DEF(THRMBATZONE_ADDR_SC),
+ PMIC_REG_DEF(THRMZN0L_ADDR_SC),
+ PMIC_REG_DEF(THRMZN0H_ADDR_SC),
+ PMIC_REG_DEF(THRMZN1L_ADDR_SC),
+ PMIC_REG_DEF(THRMZN1H_ADDR_SC),
+ PMIC_REG_DEF(THRMZN2L_ADDR_SC),
+ PMIC_REG_DEF(THRMZN2H_ADDR_SC),
+ PMIC_REG_DEF(THRMZN3L_ADDR_SC),
+ PMIC_REG_DEF(THRMZN3H_ADDR_SC),
+ PMIC_REG_DEF(THRMZN4L_ADDR_SC),
+ PMIC_REG_DEF(THRMZN4H_ADDR_SC),
+};
+
+static struct pmic_regs_def pmic_tt_regs[] = {
+ PMIC_REG_DEF(TT_I2CDADDR_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT0OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT1OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT2OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT3OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT4OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT5OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT6OS_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT7OS_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICCOS_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICCMASK_ADDR),
+ PMIC_REG_DEF(TT_CHRCVOS_ADDR),
+ PMIC_REG_DEF(TT_CHRCVMASK_ADDR),
+ PMIC_REG_DEF(TT_CHRCCOS_ADDR),
+ PMIC_REG_DEF(TT_CHRCCMASK_ADDR),
+ PMIC_REG_DEF(TT_LOWCHROS_ADDR),
+ PMIC_REG_DEF(TT_LOWCHRMASK_ADDR),
+ PMIC_REG_DEF(TT_WDOGRSTOS_ADDR),
+ PMIC_REG_DEF(TT_WDOGRSTMASK_ADDR),
+ PMIC_REG_DEF(TT_CHGRENOS_ADDR),
+ PMIC_REG_DEF(TT_CHGRENMASK_ADDR),
+ PMIC_REG_DEF(TT_CUSTOMFIELDEN_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT0VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT1VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT2VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT3VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT4VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT5VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT6VAL_ADDR),
+ PMIC_REG_DEF(TT_CHGRINIT7VAL_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICC100VAL_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICC150VAL_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICC500VAL_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICC900VAL_ADDR),
+ PMIC_REG_DEF(TT_USBINPUTICC1500VAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVEMRGLOWVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVCOLDVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVCOOLVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVWARMVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVHOTVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCVEMRGHIVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCEMRGLOWVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCCOLDVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCCOOLVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCWARMVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCHOTVAL_ADDR),
+ PMIC_REG_DEF(TT_CHRCCEMRGHIVAL_ADDR),
+ PMIC_REG_DEF(TT_LOWCHRENVAL_ADDR),
+ PMIC_REG_DEF(TT_LOWCHRDISVAL_ADDR),
+};
+
+void dump_pmic_regs(void)
+{
+ int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+ u32 pmic_reg_cnt = 0;
+ u32 reg_index;
+ u8 data;
+ int retval;
+ struct pmic_regs_def *pmic_regs = NULL;
+
+ if (vendor_id == BASINCOVE_VENDORID) {
+ pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+ pmic_regs = pmic_regs_bc;
+ } else if (vendor_id == SHADYCOVE_VENDORID) {
+ pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+ pmic_regs = pmic_regs_sc;
+ }
+
+ dev_info(chc.dev, "PMIC Register dump\n");
+ dev_info(chc.dev, "====================\n");
+
+ for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+ retval = intel_scu_ipc_ioread8(pmic_regs[reg_index].addr,
+ &data);
+ if (retval)
+ dev_err(chc.dev, "Error in reading %x\n",
+ pmic_regs[reg_index].addr);
+ else
+ dev_info(chc.dev, "0x%x=0x%x\n",
+ pmic_regs[reg_index].addr, data);
+ }
+ dev_info(chc.dev, "====================\n");
+}
+
+void dump_pmic_tt_regs(void)
+{
+ u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+ u32 reg_index;
+ u8 data;
+ int retval;
+
+ dev_info(chc.dev, "PMIC CHRGR TT dump\n");
+ dev_info(chc.dev, "====================\n");
+
+ for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+ retval = pmic_read_tt(pmic_tt_regs[reg_index].addr, &data);
+ if (retval)
+ dev_err(chc.dev, "Error in reading %x\n",
+ pmic_tt_regs[reg_index].addr);
+ else
+ dev_info(chc.dev, "0x%x=0x%x\n",
+ pmic_tt_regs[reg_index].addr, data);
+ }
+
+ dev_info(chc.dev, "====================\n");
+}
+static const struct file_operations pmic_chrgr_reg_fops = {
+ .open = pmic_chrgr_reg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations pmic_chrgr_tt_reg_fops = {
+ .open = pmic_chrgr_tt_reg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static void pmic_debugfs_init(void)
+{
+ struct dentry *fentry;
+ struct dentry *pmic_regs_dir;
+ struct dentry *pmic_tt_regs_dir;
+
+ u32 reg_index;
+ int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+ u32 pmic_reg_cnt = 0;
+ u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+ char name[PMIC_REG_NAME_LEN] = {0};
+ struct pmic_regs_def *pmic_regs = NULL;
+
+ if (vendor_id == BASINCOVE_VENDORID) {
+ pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+ pmic_regs = pmic_regs_bc;
+ } else if (vendor_id == SHADYCOVE_VENDORID) {
+ pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+ pmic_regs = pmic_regs_sc;
+ }
+
+ /* Creating a directory under debug fs for charger */
+ charger_debug_dir = debugfs_create_dir(DRIVER_NAME , NULL) ;
+ if (charger_debug_dir == NULL)
+ goto debugfs_root_exit;
+
+ /* Create a directory for pmic charger registers */
+ pmic_regs_dir = debugfs_create_dir("pmic_ccsm_regs",
+ charger_debug_dir);
+
+ if (pmic_regs_dir == NULL)
+ goto debugfs_err_exit;
+
+ for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+ sprintf(name, "%s",
+ pmic_regs[reg_index].reg_name);
+
+ fentry = debugfs_create_file(name,
+ S_IRUGO,
+ pmic_regs_dir,
+ &pmic_regs[reg_index].addr,
+ &pmic_chrgr_reg_fops);
+
+ if (fentry == NULL)
+ goto debugfs_err_exit;
+ }
+
+ /* Create a directory for pmic tt charger registers */
+ pmic_tt_regs_dir = debugfs_create_dir("pmic_ccsm_tt_regs",
+ charger_debug_dir);
+
+ if (pmic_tt_regs_dir == NULL)
+ goto debugfs_err_exit;
+
+ for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+ sprintf(name, "%s", pmic_tt_regs[reg_index].reg_name);
+
+ fentry = debugfs_create_file(name,
+ S_IRUGO,
+ pmic_tt_regs_dir,
+ &pmic_tt_regs[reg_index].addr,
+ &pmic_chrgr_tt_reg_fops);
+
+ if (fentry == NULL)
+ goto debugfs_err_exit;
+ }
+
+ dev_dbg(chc.dev, "Debugfs created successfully!!");
+ return;
+
+debugfs_err_exit:
+ debugfs_remove_recursive(charger_debug_dir);
+debugfs_root_exit:
+ dev_err(chc.dev, "Error creating debugfs entry!!");
+ return;
+}
+
+static void pmic_debugfs_exit(void)
+{
+ if (charger_debug_dir != NULL)
+ debugfs_remove_recursive(charger_debug_dir);
+}
+#endif
+
+static void pmic_bat_zone_changed(void)
+{
+ int retval;
+ int cur_zone;
+ u16 addr = 0;
+ u8 data = 0;
+ struct power_supply *psy_bat;
+ int vendor_id;
+
+ vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+ if (vendor_id == BASINCOVE_VENDORID)
+ addr = THRMBATZONE_ADDR_BC;
+ else if (vendor_id == SHADYCOVE_VENDORID)
+ addr = THRMBATZONE_ADDR_SC;
+
+ retval = intel_scu_ipc_ioread8(addr, &data);
+ if (retval) {
+ dev_err(chc.dev, "Error in reading battery zone\n");
+ return;
+ }
+
+ cur_zone = data & THRMBATZONE_MASK;
+ dev_info(chc.dev, "Battery Zone changed. Current zone is %d\n",
+ (data & THRMBATZONE_MASK));
+
+ /* if current zone is the top and bottom zones then report OVERHEAT
+ */
+ if ((cur_zone == PMIC_BZONE_LOW) || (cur_zone == PMIC_BZONE_HIGH))
+ chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ chc.health = POWER_SUPPLY_HEALTH_GOOD;
+
+ psy_bat = get_psy_battery();
+
+ if (psy_bat && psy_bat->external_power_changed)
+ psy_bat->external_power_changed(psy_bat);
+
+ return;
+}
+
+static void pmic_battery_overheat_handler(bool stat)
+{
+ if (stat)
+ chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ chc.health = POWER_SUPPLY_HEALTH_GOOD;
+ return;
+}
+
+int pmic_get_health(void)
+{
+ return chc.health;
+}
+
+int pmic_enable_vbus(bool enable)
+{
+ int ret;
+ int vendor_id;
+
+ vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+ if (enable) {
+ ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+ WDT_NOKICK_ENABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+ if (ret)
+ return ret;
+
+ if (vendor_id == SHADYCOVE_VENDORID)
+ ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+ CHGRCTRL1_OTGMODE_MASK,
+ CHGRCTRL1_OTGMODE_MASK);
+ } else {
+ ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+ WDT_NOKICK_DISABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+ if (ret)
+ return ret;
+
+ if (vendor_id == SHADYCOVE_VENDORID)
+ ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+ 0x0, CHGRCTRL1_OTGMODE_MASK);
+ }
+
+ /* If access is blocked return success to avoid additional
+ * error handling at client side
+ */
+ if (ret == -EACCES) {
+ dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int pmic_enable_charging(bool enable)
+{
+ int ret;
+ u8 val;
+
+ if (enable) {
+ ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+ CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+ if (ret)
+ return ret;
+ }
+
+ val = (enable) ? 0 : EXTCHRDIS_ENABLE;
+
+ ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+ val, CHGRCTRL0_EXTCHRDIS_MASK);
+ /* If access is blocked return success to avoid additional
+ * error handling at client side
+ */
+ if (ret == -EACCES) {
+ dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline int update_zone_cc(int zone, u8 reg_val)
+{
+ u8 addr_cc = TT_CHRCCHOTVAL_ADDR - zone;
+ dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cc, reg_val);
+ return pmic_write_tt(addr_cc, reg_val);
+}
+
+static inline int update_zone_cv(int zone, u8 reg_val)
+{
+ u8 addr_cv = TT_CHRCVHOTVAL_ADDR - zone;
+ dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cv, reg_val);
+ return pmic_write_tt(addr_cv, reg_val);
+}
+
+static inline int update_zone_temp(int zone, u16 adc_val)
+{
+ int ret;
+ u16 addr_tzone;
+ int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+ if (vendor_id == BASINCOVE_VENDORID)
+ addr_tzone = THRMZN4H_ADDR_BC - (2 * zone);
+ else if (vendor_id == SHADYCOVE_VENDORID) {
+ /* to take care of address-discontinuity of zone-registers */
+ int offset_zone = zone;
+ if (zone >= 3)
+ offset_zone += 1;
+
+ addr_tzone = THRMZN4H_ADDR_SC - (2 * offset_zone);
+ } else {
+ dev_err(chc.dev, "%s: invalid vendor id %X\n", __func__, vendor_id);
+ return -EINVAL;
+ }
+
+ ret = intel_scu_ipc_iowrite8(addr_tzone, (u8)(adc_val >> 8));
+ if (unlikely(ret))
+ return ret;
+ dev_dbg(chc.dev, "%s:%X:%X=%X\n", __func__, addr_tzone,
+ (addr_tzone+1), adc_val);
+
+ return intel_scu_ipc_iowrite8(addr_tzone+1, (u8)(adc_val & 0xFF));
+}
+
+int pmic_set_cc(int new_cc)
+{
+ struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+ struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+ int temp_mon_ranges;
+ int new_cc1;
+ int ret;
+ int i;
+ u8 reg_val = 0;
+
+ /* No need to write PMIC if CC = 0 */
+ if (!new_cc)
+ return 0;
+
+ temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+ BATT_TEMP_NR_RNG);
+
+ for (i = 0; i < temp_mon_ranges; ++i) {
+ new_cc1 = min_t(int, new_cc,
+ bcprof->temp_mon_range[i].full_chrg_cur);
+
+ if (new_cc1 != r_bcprof->temp_mon_range[i].full_chrg_cur) {
+ if (chc.pdata->cc_to_reg) {
+ chc.pdata->cc_to_reg(new_cc1, ®_val);
+ ret = update_zone_cc(i, reg_val);
+ if (unlikely(ret))
+ return ret;
+ }
+ r_bcprof->temp_mon_range[i].full_chrg_cur = new_cc1;
+ }
+ }
+
+ /* send the new CC and CV */
+ intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+ CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+ return 0;
+}
+
+int pmic_set_cv(int new_cv)
+{
+ struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+ struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+ int temp_mon_ranges;
+ int new_cv1;
+ int ret;
+ int i;
+ u8 reg_val = 0;
+
+ /* No need to write PMIC if CV = 0 */
+ if (!new_cv)
+ return 0;
+
+ temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+ BATT_TEMP_NR_RNG);
+
+ for (i = 0; i < temp_mon_ranges; ++i) {
+ new_cv1 = min_t(int, new_cv,
+ bcprof->temp_mon_range[i].full_chrg_vol);
+
+ if (new_cv1 != r_bcprof->temp_mon_range[i].full_chrg_vol) {
+ if (chc.pdata->cv_to_reg) {
+ chc.pdata->cv_to_reg(new_cv1, ®_val);
+ ret = update_zone_cv(i, reg_val);
+ if (unlikely(ret))
+ return ret;
+ }
+ r_bcprof->temp_mon_range[i].full_chrg_vol = new_cv1;
+ }
+ }
+
+ /* send the new CC and CV */
+ intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+ CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+ return 0;
+}
+
+int pmic_set_ilimma(int ilim_ma)
+{
+ u8 reg_val;
+ int ret;
+
+ lookup_regval(pmic_inlmt, ARRAY_SIZE(pmic_inlmt),
+ ilim_ma, ®_val);
+ dev_dbg(chc.dev, "Setting inlmt %d in register %x=%x\n", ilim_ma,
+ CHGRCTRL1_ADDR, reg_val);
+ ret = intel_scu_ipc_iowrite8(CHGRCTRL1_ADDR, reg_val);
+
+ /* If access is blocked return success to avoid additional
+ * error handling at client side
+ */
+ if (ret == -EACCES) {
+ dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * pmic_read_adc_val - read ADC value of specified sensors
+ * @channel: channel of the sensor to be sampled
+ * @sensor_val: pointer to the charger property to hold sampled value
+ * @chc : battery info pointer
+ *
+ * Returns 0 if success
+ */
+static int pmic_read_adc_val(int channel, int *sensor_val,
+ struct pmic_chrgr_drv_context *chc)
+{
+ int val;
+ int ret;
+ struct iio_channel *indio_chan;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+ indio_chan = iio_st_channel_get("BATTEMP", "BATTEMP0");
+#else
+ indio_chan = iio_channel_get(NULL, "BATTEMP0");
+#endif
+ if (IS_ERR_OR_NULL(indio_chan)) {
+ ret = PTR_ERR(indio_chan);
+ goto exit;
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+ ret = iio_st_read_channel_raw(indio_chan, &val);
+#else
+ ret = iio_read_channel_raw(indio_chan, &val);
+#endif
+ if (ret) {
+ dev_err(chc->dev, "IIO channel read error\n");
+ goto err_exit;
+ }
+
+ switch (channel) {
+ case GPADC_BATTEMP0:
+ ret = CONVERT_ADC_TO_TEMP(val, sensor_val);
+ break;
+ default:
+ dev_err(chc->dev, "invalid sensor%d", channel);
+ ret = -EINVAL;
+ }
+ dev_dbg(chc->dev, "pmic_ccsm pmic_ccsm.0: %s adc val=%x, %d temp=%d\n",
+ __func__, val, val, *sensor_val);
+
+err_exit:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+ iio_st_channel_release(indio_chan);
+#else
+ iio_channel_release(indio_chan);
+#endif
+exit:
+ return ret;
+}
+
+int pmic_get_battery_pack_temp(int *temp)
+{
+ if (chc.invalid_batt)
+ return -ENODEV;
+ return pmic_read_adc_val(GPADC_BATTEMP0, temp, &chc);
+}
+
+static int get_charger_type()
+{
+ int ret, i = 0;
+ u8 val;
+ int chgr_type;
+
+ do {
+ ret = pmic_read_reg(USBSRCDETSTATUS_ADDR, &val);
+ if (ret != 0) {
+ dev_err(chc.dev,
+ "Error reading USBSRCDETSTAT-register 0x%2x\n",
+ USBSRCDETSTATUS_ADDR);
+ return 0;
+ }
+
+ i++;
+ dev_info(chc.dev, "Read USBSRCDETSTATUS val: %x\n", val);
+
+ if (val & USBSRCDET_SUSBHWDET_DETSUCC)
+ break;
+ else
+ msleep(USBSRCDET_SLEEP_TIME);
+ } while (i < USBSRCDET_RETRY_CNT);
+
+ if (!(val & USBSRCDET_SUSBHWDET_DETSUCC)) {
+ dev_err(chc.dev, "Charger detection unsuccessful after %dms\n",
+ i * USBSRCDET_SLEEP_TIME);
+ return 0;
+ }
+
+ chgr_type = (val & USBSRCDET_USBSRCRSLT_MASK) >> 2;
+ dev_info(chc.dev, "Charger type after detection complete: %d\n",
+ (val & USBSRCDET_USBSRCRSLT_MASK) >> 2);
+
+ switch (chgr_type) {
+ case PMIC_CHARGER_TYPE_SDP:
+ return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+ case PMIC_CHARGER_TYPE_DCP:
+ return POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+ case PMIC_CHARGER_TYPE_CDP:
+ return POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+ case PMIC_CHARGER_TYPE_ACA:
+ return POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+ case PMIC_CHARGER_TYPE_SE1:
+ return POWER_SUPPLY_CHARGER_TYPE_SE1;
+ case PMIC_CHARGER_TYPE_MHL:
+ return POWER_SUPPLY_CHARGER_TYPE_MHL;
+ default:
+ return POWER_SUPPLY_CHARGER_TYPE_NONE;
+ }
+}
+
+static void handle_internal_usbphy_notifications(int mask)
+{
+ struct power_supply_cable_props cap;
+
+ if (mask) {
+ cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ cap.chrg_type = get_charger_type();
+ chc.charger_type = cap.chrg_type;
+ } else {
+ cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+ cap.chrg_type = chc.charger_type;
+ }
+
+ if (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+ cap.ma = 0;
+ else if ((cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+ || (cap.chrg_type == POWER_SUPPLY_TYPE_USB_CDP)
+ || (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_SE1))
+ cap.ma = 1500;
+
+ dev_info(chc.dev, "Notifying OTG ev:%d, evt:%d, chrg_type:%d, mA:%d\n",
+ USB_EVENT_CHARGER, cap.chrg_evt, cap.chrg_type,
+ cap.ma);
+ atomic_notifier_call_chain(&chc.otg->notifier,
+ USB_EVENT_CHARGER, &cap);
+}
+
+/* ShadyCove-WA for VBUS removal detect issue */
+int pmic_handle_low_supply(void)
+{
+ int ret;
+ u8 val;
+ int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+ dev_info(chc.dev, "Low-supply event received from external-charger\n");
+ if (vendor_id == BASINCOVE_VENDORID || !chc.vbus_connect_status) {
+ dev_err(chc.dev, "Ignore Low-supply event received\n");
+ return 0;
+ }
+
+ msleep(50);
+ ret = pmic_read_reg(SCHGRIRQ1_ADDR, &val);
+ if (ret) {
+ dev_err(chc.dev,
+ "Error reading SCHGRIRQ1-register 0x%2x\n",
+ SCHGRIRQ1_ADDR);
+ return ret;
+ }
+
+ if (!(val & SCHRGRIRQ1_SVBUSDET_MASK)) {
+ int mask = 0;
+
+ dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+ chc.vbus_connect_status = false;
+
+ if (chc.is_internal_usb_phy)
+ handle_internal_usbphy_notifications(mask);
+ else
+ atomic_notifier_call_chain(&chc.otg->notifier,
+ USB_EVENT_VBUS, &mask);
+ }
+
+ return ret;
+}
+
+static void handle_level0_interrupt(u8 int_reg, u8 stat_reg,
+ struct interrupt_info int_info[],
+ int int_info_size)
+{
+ int i;
+ bool int_stat;
+ char *log_msg;
+
+ for (i = 0; i < int_info_size; ++i) {
+
+ /*continue if interrupt register bit is not set */
+ if (!(int_reg & int_info[i].int_reg_mask))
+ continue;
+
+ /*log message if interrupt bit is set */
+ if (int_info[i].log_msg_int_reg_true)
+ dev_err(chc.dev, "%s",
+ int_info[i].log_msg_int_reg_true);
+
+ /* interrupt bit is set.call int handler. */
+ if (int_info[i].int_handle)
+ int_info[i].int_handle();
+
+ /* continue if stat_reg_mask is zero which
+ * means ignore status register
+ */
+ if (!(int_info[i].stat_reg_mask))
+ continue;
+
+ dev_dbg(chc.dev,
+ "stat_reg=%X int_info[i].stat_reg_mask=%X",
+ stat_reg, int_info[i].stat_reg_mask);
+
+ /* check if the interrupt status is true */
+ int_stat = (stat_reg & int_info[i].stat_reg_mask);
+
+ /* log message */
+ log_msg = int_stat ? int_info[i].log_msg_stat_true :
+ int_info[i].log_msg_stat_false;
+
+ if (log_msg)
+ dev_err(chc.dev, "%s", log_msg);
+
+ /* call status handler function */
+ if (int_info[i].stat_handle)
+ int_info[i].stat_handle(int_stat);
+
+ }
+
+ return ;
+}
+
+static void handle_level1_interrupt(u8 int_reg, u8 stat_reg)
+{
+ int mask;
+ u8 usb_id_sts;
+ int ret;
+
+ if (!int_reg)
+ return;
+
+ mask = !!(int_reg & stat_reg);
+ if (int_reg & CHRGRIRQ1_SUSBIDDET_MASK) {
+ if (mask)
+ dev_info(chc.dev,
+ "USB ID Detected. Notifying OTG driver\n");
+ else
+ dev_info(chc.dev,
+ "USB ID Removed. Notifying OTG driver\n");
+ atomic_notifier_call_chain(&chc.otg->notifier,
+ USB_EVENT_ID, &mask);
+ }
+
+ if (int_reg & CHRGRIRQ1_SVBUSDET_MASK) {
+ if (mask) {
+ dev_info(chc.dev,
+ "USB VBUS Detected. Notifying OTG driver\n");
+ chc.vbus_connect_status = true;
+ } else {
+ dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+ chc.vbus_connect_status = false;
+ }
+
+ if (chc.is_internal_usb_phy)
+ handle_internal_usbphy_notifications(mask);
+ else
+ atomic_notifier_call_chain(&chc.otg->notifier,
+ USB_EVENT_VBUS, &mask);
+ }
+
+ return;
+}
+static void pmic_event_worker(struct work_struct *work)
+{
+ struct pmic_event *evt, *tmp;
+
+ dev_dbg(chc.dev, "%s\n", __func__);
+
+ mutex_lock(&chc.evt_queue_lock);
+ list_for_each_entry_safe(evt, tmp, &chc.evt_queue, node) {
+ list_del(&evt->node);
+
+ dev_dbg(chc.dev, "CHGRIRQ0=%X SCHGRIRQ0=%X CHGRIRQ1=%x SCHGRIRQ1=%X\n",
+ evt->chgrirq0_int, evt->chgrirq0_stat,
+ evt->chgrirq1_int, evt->chgrirq1_stat);
+ if (evt->chgrirq0_int)
+ handle_level0_interrupt(evt->chgrirq0_int,
+ evt->chgrirq0_stat, chgrirq0_info,
+ ARRAY_SIZE(chgrirq0_info));
+
+ if (evt->chgrirq1_stat)
+ handle_level1_interrupt(evt->chgrirq1_int,
+ evt->chgrirq1_stat);
+ kfree(evt);
+ }
+
+ mutex_unlock(&chc.evt_queue_lock);
+}
+
+static irqreturn_t pmic_isr(int irq, void *data)
+{
+ u16 pmic_intr;
+ u8 chgrirq0_int;
+ u8 chgrirq1_int;
+ u8 mask = ((CHRGRIRQ1_SVBUSDET_MASK) | (CHRGRIRQ1_SUSBIDDET_MASK));
+
+ pmic_intr = ioread16(chc.pmic_intr_iomap);
+ chgrirq0_int = (u8)pmic_intr;
+ chgrirq1_int = (u8)(pmic_intr >> 8);
+
+ if (!chgrirq1_int && !(chgrirq0_int & PMIC_CHRGR_INT0_MASK))
+ return IRQ_NONE;
+
+ dev_dbg(chc.dev, "%s", __func__);
+
+ return IRQ_WAKE_THREAD;
+}
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+ u16 pmic_intr;
+ struct pmic_event *evt;
+ int ret;
+
+ evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+ if (evt == NULL) {
+ dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+ __func__);
+ return IRQ_NONE;
+ }
+
+ pmic_intr = ioread16(chc.pmic_intr_iomap);
+ evt->chgrirq0_int = (u8)pmic_intr;
+ evt->chgrirq1_int = (u8)(pmic_intr >> 8);
+ dev_dbg(chc.dev, "irq0=%x irq1=%x\n",
+ evt->chgrirq0_int, evt->chgrirq1_int);
+
+ /*
+ In case this is an external charger interrupt, we are
+ clearing the level 1 irq register and let external charger
+ driver handle the interrupt.
+ */
+
+ if (!(evt->chgrirq1_int) &&
+ !(evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK)) {
+ intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+ if ((chc.invalid_batt) &&
+ (evt->chgrirq0_int & PMIC_CHRGR_EXT_CHRGR_INT_MASK)) {
+ dev_dbg(chc.dev, "Handling external charger interrupt!!\n");
+ kfree(evt);
+ return IRQ_HANDLED;
+ }
+ kfree(evt);
+ dev_dbg(chc.dev, "Unhandled interrupt!!\n");
+ return IRQ_NONE;
+ }
+
+ if (evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK) {
+ ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR,
+ &evt->chgrirq0_stat);
+ if (ret) {
+ dev_err(chc.dev,
+ "%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ0_ADDR\n",
+ __func__, ret);
+ kfree(evt);
+ goto end;
+ }
+ }
+ if (evt->chgrirq1_int) {
+ ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR,
+ &evt->chgrirq1_stat);
+ if (ret) {
+ dev_err(chc.dev,
+ "%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ1_ADDR\n",
+ __func__, ret);
+ kfree(evt);
+ goto end;
+ }
+ }
+
+ INIT_LIST_HEAD(&evt->node);
+
+ mutex_lock(&chc.evt_queue_lock);
+ list_add_tail(&evt->node, &chc.evt_queue);
+ mutex_unlock(&chc.evt_queue_lock);
+
+ queue_work(system_nrt_wq, &chc.evt_work);
+
+end:
+ /*clear first level IRQ */
+ dev_dbg(chc.dev, "Clearing IRQLVL1_MASK_ADDR\n");
+ intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+
+ return IRQ_HANDLED;
+}
+
+static int pmic_init(void)
+{
+ int ret = 0, i, temp_mon_ranges;
+ u16 adc_val;
+ u8 reg_val;
+ struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+
+
+ temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+ BATT_TEMP_NR_RNG);
+ for (i = 0; i < temp_mon_ranges; ++i) {
+ ret =
+ CONVERT_TEMP_TO_ADC(bcprof->temp_mon_range[i].temp_up_lim,
+ (int *)&adc_val);
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error converting temperature for zone %d!!\n",
+ i);
+ return ret;
+ }
+ ret = update_zone_temp(i, adc_val);
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error updating zone temp for zone %d\n",
+ i);
+ return ret;
+ }
+
+ if (chc.pdata->cc_to_reg)
+ chc.pdata->cc_to_reg(bcprof->temp_mon_range[i].
+ full_chrg_cur, ®_val);
+
+ ret = update_zone_cc(i, reg_val);
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error updating zone cc for zone %d\n",
+ i);
+ return ret;
+ }
+
+ if (chc.pdata->cv_to_reg)
+ chc.pdata->cv_to_reg(bcprof->temp_mon_range[i].
+ full_chrg_vol, ®_val);
+
+ ret = update_zone_cv(i, reg_val);
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error updating zone cv for zone %d\n",
+ i);
+ return ret;
+ }
+
+ /* Write lowest temp limit */
+ if (i == (bcprof->temp_mon_ranges - 1)) {
+ ret = CONVERT_TEMP_TO_ADC(bcprof->temp_low_lim,
+ (int *)&adc_val);
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error converting low lim temp!!\n");
+ return ret;
+ }
+
+ ret = update_zone_temp(i+1, adc_val);
+
+ if (unlikely(ret)) {
+ dev_err(chc.dev,
+ "Error updating last temp for zone %d\n",
+ i+1);
+ return ret;
+ }
+ }
+ }
+ ret = pmic_update_tt(TT_CUSTOMFIELDEN_ADDR,
+ TT_HOT_COLD_LC_MASK,
+ TT_HOT_COLD_LC_DIS);
+
+ if (unlikely(ret)) {
+ dev_err(chc.dev, "Error updating TT_CUSTOMFIELD_EN reg\n");
+ return ret;
+ }
+
+ if (chc.pdata->inlmt_to_reg)
+ chc.pdata->inlmt_to_reg(USBINPUTICC100VAL, ®_val);
+
+ ret = pmic_write_tt(TT_USBINPUTICC100VAL_ADDR, reg_val);
+ return ret;
+}
+
+static inline void print_ps_pse_mod_prof(struct ps_pse_mod_prof *bcprof)
+{
+ int i, temp_mon_ranges;
+
+ dev_info(chc.dev, "ChrgProf: batt_id:%s\n", bcprof->batt_id);
+ dev_info(chc.dev, "ChrgProf: battery_type:%u\n", bcprof->battery_type);
+ dev_info(chc.dev, "ChrgProf: capacity:%u\n", bcprof->capacity);
+ dev_info(chc.dev, "ChrgProf: voltage_max:%u\n", bcprof->voltage_max);
+ dev_info(chc.dev, "ChrgProf: chrg_term_ma:%u\n", bcprof->chrg_term_ma);
+ dev_info(chc.dev, "ChrgProf: low_batt_mV:%u\n", bcprof->low_batt_mV);
+ dev_info(chc.dev, "ChrgProf: disch_tmp_ul:%d\n", bcprof->disch_tmp_ul);
+ dev_info(chc.dev, "ChrgProf: disch_tmp_ll:%d\n", bcprof->disch_tmp_ll);
+ dev_info(chc.dev, "ChrgProf: temp_mon_ranges:%u\n",
+ bcprof->temp_mon_ranges);
+ temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+ BATT_TEMP_NR_RNG);
+
+ for (i = 0; i < temp_mon_ranges; ++i) {
+ dev_info(chc.dev, "ChrgProf: temp_up_lim[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].temp_up_lim);
+ dev_info(chc.dev, "ChrgProf: full_chrg_vol[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].full_chrg_vol);
+ dev_info(chc.dev, "ChrgProf: full_chrg_cur[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].full_chrg_cur);
+ dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ll[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].maint_chrg_vol_ll);
+ dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ul[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].maint_chrg_vol_ul);
+ dev_info(chc.dev, "ChrgProf: maint_chrg_cur[%d]:%d\n",
+ i, bcprof->temp_mon_range[i].maint_chrg_cur);
+ }
+ dev_info(chc.dev, "ChrgProf: temp_low_lim:%d\n", bcprof->temp_low_lim);
+}
+
+static int find_tempzone_index(short int *interval,
+ int *num_zones,
+ short int *temp_up_lim)
+{
+ struct ps_pse_mod_prof *bprof = chc.sfi_bcprof->batt_prof;
+ int up_lim_index = 0, low_lim_index = -1;
+ int diff = 0;
+ int i;
+
+ *num_zones = MIN_BATT_PROF - bprof->temp_mon_ranges + 1;
+ if ((*num_zones) <= 0)
+ return 0;
+
+ for (i = 0 ; i < bprof->temp_mon_ranges ; i++) {
+ if (bprof->temp_mon_range[i].temp_up_lim == BATT_TEMP_WARM)
+ up_lim_index = i;
+ }
+
+ low_lim_index = up_lim_index + 1;
+
+ if (low_lim_index == bprof->temp_mon_ranges)
+ diff = bprof->temp_low_lim -
+ bprof->temp_mon_range[up_lim_index].temp_up_lim;
+ else
+ diff = bprof->temp_mon_range[low_lim_index].temp_up_lim -
+ bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+ *interval = diff / (*num_zones);
+ *temp_up_lim = bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+ return up_lim_index;
+}
+
+
+static void set_pmic_batt_prof(struct ps_pse_mod_prof *new_prof,
+ struct ps_pse_mod_prof *bprof)
+{
+ int num_zones;
+ int split_index;
+ int i, j = 0;
+ short int temp_up_lim;
+ short int interval;
+
+ if ((new_prof == NULL) || (bprof == NULL))
+ return;
+
+ if (!NEED_ZONE_SPLIT(bprof)) {
+ dev_info(chc.dev, "No need to split the zones!!\n");
+ memcpy(new_prof, bprof, sizeof(struct ps_pse_mod_prof));
+ return;
+ }
+
+ strcpy(&(new_prof->batt_id[0]), &(bprof->batt_id[0]));
+ new_prof->battery_type = bprof->battery_type;
+ new_prof->capacity = bprof->capacity;
+ new_prof->voltage_max = bprof->voltage_max;
+ new_prof->chrg_term_ma = bprof->chrg_term_ma;
+ new_prof->low_batt_mV = bprof->low_batt_mV;
+ new_prof->disch_tmp_ul = bprof->disch_tmp_ul;
+ new_prof->disch_tmp_ll = bprof->disch_tmp_ll;
+
+ split_index = find_tempzone_index(&interval, &num_zones, &temp_up_lim);
+
+ for (i = 0 ; i < bprof->temp_mon_ranges; i++) {
+ if ((i == split_index) && (num_zones > 0)) {
+ for (j = 0; j < num_zones; j++,
+ temp_up_lim += interval) {
+ memcpy(&new_prof->temp_mon_range[i+j],
+ &bprof->temp_mon_range[i],
+ sizeof(bprof->temp_mon_range[i]));
+ new_prof->temp_mon_range[i+j].temp_up_lim =
+ temp_up_lim;
+ }
+ j--;
+ } else {
+ memcpy(&new_prof->temp_mon_range[i+j],
+ &bprof->temp_mon_range[i],
+ sizeof(bprof->temp_mon_range[i]));
+ }
+ }
+
+ new_prof->temp_mon_ranges = i+j;
+ new_prof->temp_low_lim = bprof->temp_low_lim;
+
+ return;
+}
+
+
+static int pmic_check_initial_events(void)
+{
+ struct pmic_event *evt;
+ int ret;
+ u8 mask = (CHRGRIRQ1_SVBUSDET_MASK);
+
+ evt = kzalloc(sizeof(struct pmic_event), GFP_KERNEL);
+ if (evt == NULL) {
+ dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+ __func__);
+ return -1;
+ }
+
+ ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR, &evt->chgrirq0_stat);
+ evt->chgrirq0_int = evt->chgrirq0_stat;
+ ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR, &evt->chgrirq1_stat);
+ evt->chgrirq1_int = evt->chgrirq1_stat;
+
+ if (evt->chgrirq1_stat || evt->chgrirq0_int) {
+ INIT_LIST_HEAD(&evt->node);
+ mutex_lock(&chc.evt_queue_lock);
+ list_add_tail(&evt->node, &chc.evt_queue);
+ mutex_unlock(&chc.evt_queue_lock);
+ schedule_work(&chc.evt_work);
+ }
+
+ pmic_bat_zone_changed();
+
+ return ret;
+}
+
+/**
+ * pmic_charger_probe - PMIC charger probe function
+ * @pdev: pmic platform device structure
+ * Context: can sleep
+ *
+ * pmic charger driver initializes its internal data
+ * structure and other infrastructure components for it
+ * to work as expected.
+ */
+static int pmic_chrgr_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+ u8 val;
+
+ if (!pdev)
+ return -ENODEV;
+
+ chc.health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ chc.dev = &pdev->dev;
+ chc.irq = platform_get_irq(pdev, 0);
+ chc.pdata = pdev->dev.platform_data;
+ platform_set_drvdata(pdev, &chc);
+
+ if (chc.pdata == NULL) {
+ dev_err(chc.dev, "Platform data not initialized\n");
+ return -EFAULT;
+ }
+
+ retval = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &chc.pmic_id);
+ if (retval) {
+ dev_err(chc.dev,
+ "Error reading PMIC ID register\n");
+ return retval;
+ }
+
+ dev_info(chc.dev, "PMIC-ID: %x\n", chc.pmic_id);
+ if ((chc.pmic_id & PMIC_VENDOR_ID_MASK) == SHADYCOVE_VENDORID) {
+ retval = pmic_read_reg(USBPATH_ADDR, &val);
+ if (retval) {
+ dev_err(chc.dev,
+ "Error reading CHGRSTATUS-register 0x%2x\n",
+ CHGRSTATUS_ADDR);
+ return retval;
+ }
+
+ if (val & USBPATH_USBSEL_MASK) {
+ dev_info(chc.dev, "SOC-Internal-USBPHY used\n");
+ chc.is_internal_usb_phy = true;
+ } else
+ dev_info(chc.dev, "External-USBPHY used\n");
+ }
+
+ chc.sfi_bcprof = kzalloc(sizeof(struct ps_batt_chg_prof),
+ GFP_KERNEL);
+ if (chc.sfi_bcprof == NULL) {
+ dev_err(chc.dev,
+ "Error allocating memeory SFI battery profile\n");
+ return -ENOMEM;
+ }
+
+ retval = get_batt_prop(chc.sfi_bcprof);
+ if (retval) {
+ dev_err(chc.dev,
+ "Error reading battery profile from battid frmwrk\n");
+ kfree(chc.sfi_bcprof);
+ chc.invalid_batt = true;
+ chc.sfi_bcprof = NULL;
+ }
+
+ retval = intel_scu_ipc_update_register(CHGRCTRL0_ADDR, SWCONTROL_ENABLE,
+ CHGRCTRL0_SWCONTROL_MASK);
+ if (retval)
+ dev_err(chc.dev, "Error enabling sw control. Charging may continue in h/w control mode\n");
+
+ if (!chc.invalid_batt) {
+ chc.actual_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+ GFP_KERNEL);
+ if (chc.actual_bcprof == NULL) {
+ dev_err(chc.dev,
+ "Error allocating mem for local battery profile\n");
+ kfree(chc.sfi_bcprof);
+ return -ENOMEM;
+ }
+
+ chc.runtime_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+ GFP_KERNEL);
+ if (chc.runtime_bcprof == NULL) {
+ dev_err(chc.dev,
+ "Error allocating mem for runtime batt profile\n");
+ kfree(chc.sfi_bcprof);
+ kfree(chc.actual_bcprof);
+ return -ENOMEM;
+ }
+
+ set_pmic_batt_prof(chc.actual_bcprof,
+ chc.sfi_bcprof->batt_prof);
+ print_ps_pse_mod_prof(chc.actual_bcprof);
+ retval = pmic_init();
+ if (retval)
+ dev_err(chc.dev, "Error in Initializing PMIC. Continue in h/w charging mode\n");
+
+ memcpy(chc.runtime_bcprof, chc.actual_bcprof,
+ sizeof(struct ps_pse_mod_prof));
+ }
+
+ chc.pmic_intr_iomap = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+ if (!chc.pmic_intr_iomap) {
+ dev_err(&pdev->dev, "ioremap Failed\n");
+ retval = -ENOMEM;
+ goto ioremap_failed;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+ chc.otg = usb_get_transceiver();
+#else
+ chc.otg = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+ if (!chc.otg || IS_ERR(chc.otg)) {
+ dev_err(&pdev->dev, "Failed to get otg transceiver!!\n");
+ retval = -ENOMEM;
+ goto otg_req_failed;
+ }
+
+ INIT_WORK(&chc.evt_work, pmic_event_worker);
+ INIT_LIST_HEAD(&chc.evt_queue);
+ mutex_init(&chc.evt_queue_lock);
+
+ /* register interrupt */
+ retval = request_threaded_irq(chc.irq, pmic_isr,
+ pmic_thread_handler,
+ IRQF_SHARED|IRQF_NO_SUSPEND ,
+ DRIVER_NAME, &chc);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "Error in request_threaded_irq(irq(%d)!!\n",
+ chc.irq);
+ goto otg_req_failed;
+ }
+
+ retval = pmic_check_initial_events();
+ if (unlikely(retval)) {
+ dev_err(&pdev->dev,
+ "Error posting initial events\n");
+ goto req_irq_failed;
+ }
+
+ /* unmask charger interrupts in second level IRQ register*/
+ retval = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+ PMIC_CHRGR_INT0_MASK);
+ /* unmask charger interrupts in second level IRQ register*/
+ retval = intel_scu_ipc_iowrite8(MCHGRIRQ1_ADDR, 0x00);
+ if (unlikely(retval))
+ goto unmask_irq_failed;
+
+
+ /* unmask IRQLVL1 register */
+ retval = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+ IRQLVL1_CHRGR_MASK);
+ if (unlikely(retval))
+ goto unmask_irq_failed;
+
+ retval = intel_scu_ipc_update_register(USBIDCTRL_ADDR,
+ ACADETEN_MASK | USBIDEN_MASK,
+ ACADETEN_MASK | USBIDEN_MASK);
+ if (unlikely(retval))
+ goto unmask_irq_failed;
+
+ chc.health = POWER_SUPPLY_HEALTH_GOOD;
+#ifdef CONFIG_DEBUG_FS
+ pmic_debugfs_init();
+#endif
+ return 0;
+
+unmask_irq_failed:
+req_irq_failed:
+ free_irq(chc.irq, &chc);
+otg_req_failed:
+ iounmap(chc.pmic_intr_iomap);
+ioremap_failed:
+ kfree(chc.sfi_bcprof);
+ kfree(chc.actual_bcprof);
+ kfree(chc.runtime_bcprof);
+ return retval;
+}
+
+static void pmic_chrgr_do_exit_ops(struct pmic_chrgr_drv_context *chc)
+{
+ /*TODO:
+ * If charger is connected send IPC message to SCU to continue charging
+ */
+#ifdef CONFIG_DEBUG_FS
+ pmic_debugfs_exit();
+#endif
+}
+
+/**
+ * pmic_charger_remove - PMIC Charger driver remove
+ * @pdev: PMIC charger platform device structure
+ * Context: can sleep
+ *
+ * PMIC charger finalizes its internal data structure and other
+ * infrastructure components that it initialized in
+ * pmic_chrgr_probe.
+ */
+static int pmic_chrgr_remove(struct platform_device *pdev)
+{
+ struct pmic_chrgr_drv_context *chc = platform_get_drvdata(pdev);
+
+ if (chc) {
+ pmic_chrgr_do_exit_ops(chc);
+ free_irq(chc->irq, chc);
+ iounmap(chc->pmic_intr_iomap);
+ kfree(chc->sfi_bcprof);
+ kfree(chc->actual_bcprof);
+ kfree(chc->runtime_bcprof);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_chrgr_suspend(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static int pmic_chrgr_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_chrgr_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static int pmic_chrgr_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+
+static int pmic_chrgr_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+ return 0;
+}
+#endif
+
+/*********************************************************************
+ * Driver initialisation and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops pmic_chrgr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pmic_chrgr_suspend,
+ pmic_chrgr_resume)
+ SET_RUNTIME_PM_OPS(pmic_chrgr_runtime_suspend,
+ pmic_chrgr_runtime_resume,
+ pmic_chrgr_runtime_idle)
+};
+
+static struct platform_driver pmic_chrgr_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &pmic_chrgr_pm_ops,
+ },
+ .probe = pmic_chrgr_probe,
+ .remove = pmic_chrgr_remove,
+};
+
+static int pmic_chrgr_init(void)
+{
+ return platform_driver_register(&pmic_chrgr_driver);
+}
+
+static void pmic_chrgr_exit(void)
+{
+ platform_driver_unregister(&pmic_chrgr_driver);
+}
+
+static int pmic_ccsm_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed pmic_ccsm rpmsg device\n");
+
+ ret = pmic_chrgr_init();
+
+out:
+ return ret;
+}
+
+static void pmic_ccsm_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ pmic_chrgr_exit();
+ dev_info(&rpdev->dev, "Removed pmic_ccsm rpmsg device\n");
+}
+
+static void pmic_ccsm_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id pmic_ccsm_rpmsg_id_table[] = {
+ { .name = "rpmsg_pmic_ccsm" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_ccsm_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_ccsm_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = pmic_ccsm_rpmsg_id_table,
+ .probe = pmic_ccsm_rpmsg_probe,
+ .callback = pmic_ccsm_rpmsg_cb,
+ .remove = pmic_ccsm_rpmsg_remove,
+};
+
+static int __init pmic_ccsm_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+
+static void __exit pmic_ccsm_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+/* Defer init call so that dependant drivers will be loaded. Using async
+ * for parallel driver initialization */
+late_initcall(pmic_ccsm_rpmsg_init);
+module_exit(pmic_ccsm_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("PMIC Charger Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * pmic_ccsm.h - Intel MID PMIC CCSM Driver header file
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#ifndef __PMIC_CCSM_H__
+#define __PMIC_CCSM_H__
+
+#include <asm/pmic_pdata.h>
+/*********************************************************************
+ * Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_ID_ADDR 0x00
+
+#define PMIC_VENDOR_ID_MASK (0x03 << 6)
+#define PMIC_MINOR_REV_MASK 0x07
+#define PMIC_MAJOR_REV_MASK (0x07 << 3)
+
+#define BASINCOVE_VENDORID (0x03 << 6)
+#define SHADYCOVE_VENDORID 0x00
+
+#define BC_PMIC_MAJOR_REV_A0 0x00
+#define BC_PMIC_MAJOR_REV_B0 (0x01 << 3)
+
+#define PMIC_BZONE_LOW 0
+#define PMIC_BZONE_HIGH 5
+
+#define IRQLVL1_ADDR 0x01
+#define IRQLVL1_MASK_ADDR 0x0c
+#define IRQLVL1_CHRGR_MASK D5
+
+#define THRMZN0H_ADDR_BC 0xCE
+#define THRMZN0L_ADDR_BC 0xCF
+#define THRMZN1H_ADDR_BC 0xD0
+#define THRMZN1L_ADDR_BC 0xD1
+#define THRMZN2H_ADDR_BC 0xD2
+#define THRMZN2L_ADDR_BC 0xD3
+#define THRMZN3H_ADDR_BC 0xD4
+#define THRMZN3L_ADDR_BC 0xD5
+#define THRMZN4H_ADDR_BC 0xD6
+#define THRMZN4L_ADDR_BC 0xD7
+
+#define THRMZN0H_ADDR_SC 0xD7
+#define THRMZN0L_ADDR_SC 0xD8
+#define THRMZN1H_ADDR_SC 0xD9
+#define THRMZN1L_ADDR_SC 0xDA
+#define THRMZN2H_ADDR_SC 0xDD
+#define THRMZN2L_ADDR_SC 0xDE
+#define THRMZN3H_ADDR_SC 0xDF
+#define THRMZN3L_ADDR_SC 0xE0
+#define THRMZN4H_ADDR_SC 0xE1
+#define THRMZN4L_ADDR_SC 0xE2
+
+#define CHGRIRQ0_ADDR 0x07
+#define CHGIRQ0_BZIRQ_MASK D7
+#define CHGIRQ0_BAT_CRIT_MASK D6
+#define CHGIRQ0_BAT1_ALRT_MASK D5
+#define CHGIRQ0_BAT0_ALRT_MASK D4
+
+#define MCHGRIRQ0_ADDR 0x12
+#define MCHGIRQ0_RSVD_MASK D7
+#define MCHGIRQ0_MBAT_CRIT_MASK D6
+#define MCHGIRQ0_MBAT1_ALRT_MASK D5
+#define MCHGIRQ0_MBAT0_ALRT_MASK D4
+
+#define SCHGRIRQ0_ADDR 0x4E
+#define SCHGIRQ0_RSVD_MASK D7
+#define SCHGIRQ0_SBAT_CRIT_MASK D6
+#define SCHGIRQ0_SBAT1_ALRT_MASK D5
+#define SCHGIRQ0_SBAT0_ALRT_MASK D4
+
+#define LOWBATTDET0_ADDR 0x2C
+#define LOWBATTDET1_ADDR 0x2D
+#define BATTDETCTRL_ADDR 0x2E
+#define VBUSDETCTRL_ADDR 0x50
+#define VDCINDETCTRL_ADDR 0x51
+
+#define CHRGRIRQ1_ADDR 0x08
+#define CHRGRIRQ1_SUSBIDDET_MASK D3
+#define CHRGRIRQ1_SBATTDET_MASK D2
+#define CHRGRIRQ1_SDCDET_MASK D1
+#define CHRGRIRQ1_SVBUSDET_MASK D0
+#define MCHGRIRQ1_ADDR 0x13
+#define MCHRGRIRQ1_SUSBIDDET_MASK D3
+#define MCHRGRIRQ1_SBATTDET_MAS D2
+#define MCHRGRIRQ1_SDCDET_MASK D1
+#define MCHRGRIRQ1_SVBUSDET_MASK D0
+#define SCHGRIRQ1_ADDR 0x4F
+#define SCHRGRIRQ1_SUSBIDDET_MASK D3
+#define SCHRGRIRQ1_SBATTDET_MASK D2
+#define SCHRGRIRQ1_SDCDET_MASK D1
+#define SCHRGRIRQ1_SVBUSDET_MASK D0
+
+#define PMIC_CHRGR_INT0_MASK 0xB1
+#define PMIC_CHRGR_CCSM_INT0_MASK 0xB0
+#define PMIC_CHRGR_EXT_CHRGR_INT_MASK 0x01
+
+#define CHGRCTRL0_ADDR 0x4B
+#define CHGRCTRL0_WDT_NOKICK_MASK D7
+#define CHGRCTRL0_DBPOFF_MASK D6
+#define CHGRCTRL0_CCSM_OFF_MASK D5
+#define CHGRCTRL0_TTLCK_MASK D4
+#define CHGRCTRL0_SWCONTROL_MASK D3
+#define CHGRCTRL0_EXTCHRDIS_MASK D2
+#define CHRCTRL0_EMRGCHREN_MASK D1
+#define CHRCTRL0_CHGRRESET_MASK D0
+
+#define WDT_NOKICK_ENABLE (0x01 << 7)
+#define WDT_NOKICK_DISABLE (~WDT_NOKICK_ENABLE & 0xFF)
+
+#define EXTCHRDIS_ENABLE (0x01 << 2)
+#define EXTCHRDIS_DISABLE (~EXTCHRDIS_ENABLE & 0xFF)
+#define SWCONTROL_ENABLE (0x01 << 3)
+#define EMRGCHREN_ENABLE (0x01 << 1)
+
+#define CHGRCTRL1_ADDR 0x4C
+#define CHGRCTRL1_DBPEN_MASK D7
+#define CHGRCTRL1_OTGMODE_MASK D6
+#define CHGRCTRL1_FTEMP_EVENT_MASK D5
+#define CHGRCTRL1_FUSB_INLMT_1500 D4
+#define CHGRCTRL1_FUSB_INLMT_900 D3
+#define CHGRCTRL1_FUSB_INLMT_500 D2
+#define CHGRCTRL1_FUSB_INLMT_150 D1
+#define CHGRCTRL1_FUSB_INLMT_100 D0
+
+#define CHGRSTATUS_ADDR 0x4D
+#define CHGRSTATUS_RSVD_MASK (D7|D6|D5|D3)
+#define CHGRSTATUS_SDPB_MASK D4
+#define CHGRSTATUS_CHGDISLVL_MASK D2
+#define CHGRSTATUS_CHGDETB_LATCH_MASK D1
+#define CHGDETB_MASK D0
+
+#define THRMBATZONE_ADDR_BC 0xB5
+#define THRMBATZONE_ADDR_SC 0xB6
+#define THRMBATZONE_MASK (D0|D1|D2)
+
+#define USBIDCTRL_ADDR 0x19
+#define USBIDEN_MASK 0x01
+#define ACADETEN_MASK (0x01 << 1)
+
+#define USBIDSTAT_ADDR 0x1A
+#define ID_SHORT D4
+#define ID_SHORT_VBUS (1 << 4)
+#define ID_NOT_SHORT_VBUS 0
+#define ID_FLOAT_STS D3
+#define R_ID_FLOAT_DETECT (1 << 3)
+#define R_ID_FLOAT_NOT_DETECT 0
+#define ID_RAR_BRC_STS ((D2 | D1))
+#define ID_ACA_NOT_DETECTED 0
+#define R_ID_A (1 << 1)
+#define R_ID_B (2 << 1)
+#define R_ID_C (3 << 1)
+#define ID_GND D0
+#define ID_TYPE_A 0
+#define ID_TYPE_B 1
+#define is_aca(x) ((x & R_ID_A) || (x & R_ID_B) || (x & R_ID_C))
+
+#define WAKESRC_ADDR 0x24
+
+#define CHRTTADDR_ADDR 0x56
+#define CHRTTDATA_ADDR 0x57
+
+#define USBSRCDET_RETRY_CNT 4
+#define USBSRCDET_SLEEP_TIME 200
+#define USBSRCDETSTATUS_ADDR 0x5D
+#define USBSRCDET_SUSBHWDET_MASK (D0|D1)
+#define USBSRCDET_USBSRCRSLT_MASK (D2|D3|D4|D5)
+#define USBSRCDET_SDCD_MASK (D6|D7)
+#define USBSRCDET_SUSBHWDET_DETON (0x01 << 0)
+#define USBSRCDET_SUSBHWDET_DETSUCC (0x01 << 1)
+#define USBSRCDET_SUSBHWDET_DETFAIL (0x03 << 0)
+
+/* Register on I2C-dev2-0x6E */
+#define USBPATH_ADDR 0x011C
+#define USBPATH_USBSEL_MASK D3
+
+#define TT_I2CDADDR_ADDR 0x00
+#define TT_CHGRINIT0OS_ADDR 0x01
+#define TT_CHGRINIT1OS_ADDR 0x02
+#define TT_CHGRINIT2OS_ADDR 0x03
+#define TT_CHGRINIT3OS_ADDR 0x04
+#define TT_CHGRINIT4OS_ADDR 0x05
+#define TT_CHGRINIT5OS_ADDR 0x06
+#define TT_CHGRINIT6OS_ADDR 0x07
+#define TT_CHGRINIT7OS_ADDR 0x08
+#define TT_USBINPUTICCOS_ADDR 0x09
+#define TT_USBINPUTICCMASK_ADDR 0x0A
+#define TT_CHRCVOS_ADDR 0X0B
+#define TT_CHRCVMASK_ADDR 0X0C
+#define TT_CHRCCOS_ADDR 0X0D
+#define TT_CHRCCMASK_ADDR 0X0E
+#define TT_LOWCHROS_ADDR 0X0F
+#define TT_LOWCHRMASK_ADDR 0X10
+#define TT_WDOGRSTOS_ADDR 0X11
+#define TT_WDOGRSTMASK_ADDR 0X12
+#define TT_CHGRENOS_ADDR 0X13
+#define TT_CHGRENMASK_ADDR 0X14
+
+#define TT_CUSTOMFIELDEN_ADDR 0X15
+#define TT_HOT_LC_EN D1
+#define TT_COLD_LC_EN D0
+#define TT_HOT_COLD_LC_MASK (TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_EN (TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_DIS 0
+
+#define TT_CHGRINIT0VAL_ADDR 0X20
+#define TT_CHGRINIT1VAL_ADDR 0X21
+#define TT_CHGRINIT2VAL_ADDR 0X22
+#define TT_CHGRINIT3VAL_ADDR 0X23
+#define TT_CHGRINIT4VAL_ADDR 0X24
+#define TT_CHGRINIT5VAL_ADDR 0X25
+#define TT_CHGRINIT6VAL_ADDR 0X26
+#define TT_CHGRINIT7VAL_ADDR 0X27
+#define TT_USBINPUTICC100VAL_ADDR 0X28
+#define TT_USBINPUTICC150VAL_ADDR 0X29
+#define TT_USBINPUTICC500VAL_ADDR 0X2A
+#define TT_USBINPUTICC900VAL_ADDR 0X2B
+#define TT_USBINPUTICC1500VAL_ADDR 0X2C
+#define TT_CHRCVEMRGLOWVAL_ADDR 0X2D
+#define TT_CHRCVCOLDVAL_ADDR 0X2E
+#define TT_CHRCVCOOLVAL_ADDR 0X2F
+#define TT_CHRCVWARMVAL_ADDR 0X30
+#define TT_CHRCVHOTVAL_ADDR 0X31
+#define TT_CHRCVEMRGHIVAL_ADDR 0X32
+#define TT_CHRCCEMRGLOWVAL_ADDR 0X33
+#define TT_CHRCCCOLDVAL_ADDR 0X34
+#define TT_CHRCCCOOLVAL_ADDR 0X35
+#define TT_CHRCCWARMVAL_ADDR 0X36
+#define TT_CHRCCHOTVAL_ADDR 0X37
+#define TT_CHRCCEMRGHIVAL_ADDR 0X38
+#define TT_LOWCHRENVAL_ADDR 0X39
+#define TT_LOWCHRDISVAL_ADDR 0X3A
+#define TT_WDOGRSTVAL_ADDR 0X3B
+#define TT_CHGRENVAL_ADDR 0X3C
+#define TT_CHGRDISVAL_ADDR 0X3D
+
+/*Interrupt registers*/
+#define BATT_CHR_BATTDET_MASK D2
+/*Status registers*/
+#define BATT_PRESENT 1
+#define BATT_NOT_PRESENT 0
+
+#define BATT_STRING_MAX 8
+#define BATTID_STR_LEN 8
+
+#define CHARGER_PRESENT 1
+#define CHARGER_NOT_PRESENT 0
+
+/*FIXME: Modify default values */
+#define BATT_DEAD_CUTOFF_VOLT 3400 /* 3400 mV */
+#define BATT_CRIT_CUTOFF_VOLT 3700 /* 3700 mV */
+
+#define MSIC_BATT_TEMP_MAX 60 /* 60 degrees */
+#define MSIC_BATT_TEMP_MIN 0
+
+#define BATT_TEMP_WARM 45 /* 45 degrees */
+#define MIN_BATT_PROF 4
+
+#define PMIC_REG_NAME_LEN 28
+#define PMIC_REG_DEF(x) { .reg_name = #x, .addr = x }
+
+struct interrupt_info {
+ /* Interrupt register mask*/
+ u8 int_reg_mask;
+ /* interrupt status register mask */
+ u8 stat_reg_mask;
+ /* log message if interrupt is set */
+ char *log_msg_int_reg_true;
+ /* log message if stat is true or false */
+ char *log_msg_stat_true;
+ char *log_msg_stat_false;
+ /* handle if interrupt bit is set */
+ void (*int_handle) (void);
+ /* interrupt status handler */
+ void (*stat_handle) (bool);
+};
+
+enum pmic_charger_cable_type {
+ PMIC_CHARGER_TYPE_NONE = 0,
+ PMIC_CHARGER_TYPE_SDP,
+ PMIC_CHARGER_TYPE_DCP,
+ PMIC_CHARGER_TYPE_CDP,
+ PMIC_CHARGER_TYPE_ACA,
+ PMIC_CHARGER_TYPE_SE1,
+ PMIC_CHARGER_TYPE_MHL,
+ PMIC_CHARGER_TYPE_FLOAT_DP_DN,
+ PMIC_CHARGER_TYPE_OTHER,
+ PMIC_CHARGER_TYPE_DCP_EXTPHY,
+};
+
+struct pmic_chrgr_drv_context {
+ bool invalid_batt;
+ bool is_batt_present;
+ bool current_sense_enabled;
+ unsigned int irq; /* GPE_ID or IRQ# */
+ void __iomem *pmic_intr_iomap;
+ struct device *dev;
+ int health;
+ u8 pmic_id;
+ bool is_internal_usb_phy;
+ enum pmic_charger_cable_type charger_type;
+ /* ShadyCove-WA for VBUS removal detect issue */
+ bool vbus_connect_status;
+ struct ps_batt_chg_prof *sfi_bcprof;
+ struct ps_pse_mod_prof *actual_bcprof;
+ struct ps_pse_mod_prof *runtime_bcprof;
+ struct pmic_platform_data *pdata;
+ struct usb_phy *otg;
+ struct list_head evt_queue;
+ struct work_struct evt_work;
+ struct mutex evt_queue_lock;
+};
+
+struct pmic_event {
+ struct list_head node;
+ u8 chgrirq0_int;
+ u8 chgrirq1_int;
+ u8 chgrirq0_stat;
+ u8 chgrirq1_stat;
+};
+
+struct pmic_regs_def {
+ char reg_name[PMIC_REG_NAME_LEN];
+ u16 addr;
+};
+
+#endif
static inline void power_supply_remove_triggers(struct power_supply *psy) {}
#endif /* CONFIG_LEDS_TRIGGERS */
+#ifdef CONFIG_POWER_SUPPLY_CHARGER
+
+extern void power_supply_trigger_charging_handler(struct power_supply *psy);
+extern int power_supply_register_charger(struct power_supply *psy);
+extern int power_supply_unregister_charger(struct power_supply *psy);
+extern int psy_charger_throttle_charger(struct power_supply *psy,
+ unsigned long state);
+
+#else
+
+static inline void
+ power_supply_trigger_charging_handler(struct power_supply *psy) { }
+static inline int power_supply_register_charger(struct power_supply *psy)
+{ return 0; }
+static inline int power_supply_unregister_charger(struct power_supply *psy)
+{ return 0; }
+static inline int psy_charger_throttle_charger(struct power_supply *psy,
+ unsigned long state)
+{ return 0; }
+
+#endif
--- /dev/null
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/extcon.h>
+#include <linux/power/battery_id.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+struct work_struct otg_work;
+#define MAX_CHARGER_COUNT 5
+
+static LIST_HEAD(algo_list);
+
+struct power_supply_charger {
+ bool is_cable_evt_reg;
+ /*cache battery and charger properties */
+ struct list_head chrgr_cache_lst;
+ struct list_head batt_cache_lst;
+ struct list_head evt_queue;
+ struct work_struct algo_trigger_work;
+ struct mutex evt_lock;
+ wait_queue_head_t wait_chrg_enable;
+};
+
+struct charger_cable {
+ struct work_struct work;
+ struct notifier_block nb;
+ struct extcon_chrgr_cbl_props cable_props;
+ enum extcon_cable_name extcon_cable_type;
+ enum power_supply_charger_cable_type psy_cable_type;
+ struct extcon_specific_cable_nb extcon_dev;
+ struct extcon_dev *edev;
+};
+
+static struct power_supply_charger psy_chrgr;
+
+static struct charger_cable cable_list[] = {
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP,
+ .extcon_cable_type = EXTCON_SDP,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP,
+ .extcon_cable_type = EXTCON_CDP,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP,
+ .extcon_cable_type = EXTCON_DCP,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_ACA,
+ .extcon_cable_type = EXTCON_ACA,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK,
+ .extcon_cable_type = EXTCON_ACA,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_SE1,
+ .extcon_cable_type = EXTCON_TA,
+ },
+ {
+ .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_AC,
+ .extcon_cable_type = EXTCON_AC,
+ },
+};
+
+static int get_supplied_by_list(struct power_supply *psy,
+ struct power_supply *psy_lst[]);
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *data);
+struct usb_phy *otg_xceiver;
+struct notifier_block otg_nb = {
+ .notifier_call = otg_handle_notification,
+ };
+static void configure_chrgr_source(struct charger_cable *cable_lst);
+
+struct charger_cable *get_cable(unsigned long usb_chrgr_type)
+{
+
+ switch (usb_chrgr_type) {
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ pr_info("%s:%d SDP\n", __FILE__, __LINE__);
+ return &cable_list[0];
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ pr_info("%s:%d CDP\n", __FILE__, __LINE__);
+ return &cable_list[1];
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ pr_info("%s:%d DCP\n", __FILE__, __LINE__);
+ return &cable_list[2];
+ case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+ pr_info("%s:%d ACA\n", __FILE__, __LINE__);
+ return &cable_list[3];
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ pr_info("%s:%d ACA DOCK\n", __FILE__, __LINE__);
+ return &cable_list[4];
+ case POWER_SUPPLY_CHARGER_TYPE_AC:
+ pr_info("%s:%d AC\n", __FILE__, __LINE__);
+ return &cable_list[6];
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ pr_info("%s:%d SE1\n", __FILE__, __LINE__);
+ return &cable_list[5];
+ }
+
+ return NULL;
+}
+
+
+static void otg_event_worker(struct work_struct *work)
+{
+ configure_chrgr_source(cable_list);
+
+}
+
+static int process_cable_props(struct power_supply_cable_props *cap)
+{
+
+ struct charger_cable *cable = NULL;
+
+ cable = get_cable(cap->chrg_type);
+ if (!cable) {
+
+ pr_err("%s:%d Error in getting charger cable from get_cable\n",
+ __FILE__, __LINE__);
+ return -EINVAL;
+ }
+
+ switch (cap->chrg_evt) {
+ case POWER_SUPPLY_CHARGER_EVENT_CONNECT:
+ printk(KERN_ERR "%s:%d Connected inlmt=%d\n",
+ __FILE__, __LINE__, cap->mA);
+ cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_CONNECTED;
+ break;
+ case POWER_SUPPLY_CHARGER_EVENT_UPDATE:
+ printk(KERN_ERR "%s:%d Connected\n", __FILE__, __LINE__);
+ cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_UPDATED;
+ break;
+ case POWER_SUPPLY_CHARGER_EVENT_DISCONNECT:
+ printk(KERN_ERR "%s:%d Disconnected inlmt=%d\n",
+ __FILE__, __LINE__, cap->mA);
+ cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+ break;
+ case POWER_SUPPLY_CHARGER_EVENT_SUSPEND:
+ printk(KERN_ERR "%s:%d Suspended inlmt=%d\n",
+ __FILE__, __LINE__, cap->mA);
+ cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_SUSPENDED;
+ break;
+ default:
+ printk(KERN_ERR "%s:%d Invalid event\n", __FILE__, __LINE__);
+ break;
+ }
+
+ cable->cable_props.mA = cap->mA;
+ schedule_work(&otg_work);
+
+ return 0;
+
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+
+ struct power_supply_cable_props *cap;
+
+ cap = (struct power_supply_cable_props *)data;
+
+ if (event != USB_EVENT_CHARGER)
+ return NOTIFY_DONE;
+
+ process_cable_props(cap);
+
+
+ return NOTIFY_OK;
+}
+
+int otg_register(void)
+{
+ int retval;
+
+ otg_xceiver = usb_get_transceiver();
+ if (!otg_xceiver) {
+ pr_err("%s:%d failure to get otg transceiver\n",
+ __FILE__, __LINE__);
+ goto otg_reg_failed;
+ }
+ retval = usb_register_notifier(otg_xceiver, &otg_nb);
+ if (retval) {
+ pr_err("%s:%d failure to register otg notifier\n",
+ __FILE__, __LINE__);
+ goto otg_reg_failed;
+ }
+
+ INIT_WORK(&otg_work, otg_event_worker);
+
+
+ return 0;
+
+otg_reg_failed:
+
+ return -EIO;
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr);
+static void charger_cable_event_worker(struct work_struct *work);
+struct charging_algo *power_supply_get_charging_algo
+ (struct power_supply *, struct ps_batt_chg_prof *);
+
+static void init_charger_cables(struct charger_cable *cable_lst, int count)
+{
+ struct charger_cable *cable;
+ struct extcon_chrgr_cbl_props cable_props;
+ const char *cable_name;
+ struct power_supply_cable_props cap;
+
+ otg_register();
+
+ while (--count) {
+ cable = cable_lst++;
+ /* initialize cable instance */
+ INIT_WORK(&cable->work, charger_cable_event_worker);
+ cable->nb.notifier_call = charger_cable_notifier;
+ cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+ cable->cable_props.mA = 0;
+ cable_name = extcon_cable_name[cable->extcon_cable_type];
+
+ if (extcon_register_interest(&cable->extcon_dev,
+ NULL, cable_name, &cable->nb))
+ continue;
+
+ cable->edev = cable->extcon_dev.edev;
+
+ if (!cable->edev)
+ continue;
+
+ if (cable->edev->get_cable_properties(cable_name,
+ (void *)&cable_props)) {
+ continue;
+
+ } else if (cable_props.cable_stat !=
+ cable->cable_props.cable_stat) {
+ cable->cable_props.cable_stat = cable_props.cable_stat;
+ cable->cable_props.mA = cable_props.mA;
+ }
+ }
+
+ if (!otg_get_chrg_status(otg_xceiver, &cap))
+ process_cable_props(&cap);
+
+}
+
+static inline void get_cur_chrgr_prop(struct power_supply *psy,
+ struct charger_props *chrgr_prop)
+{
+ chrgr_prop->is_charging = IS_CHARGING_ENABLED(psy);
+ chrgr_prop->name = psy->name;
+ chrgr_prop->online = IS_ONLINE(psy);
+ chrgr_prop->present = IS_PRESENT(psy);
+ chrgr_prop->cable = CABLE_TYPE(psy);
+ chrgr_prop->health = HEALTH(psy);
+ chrgr_prop->tstamp = get_jiffies_64();
+
+}
+
+static inline int get_chrgr_prop_cache(struct power_supply *psy,
+ struct charger_props *chrgr_cache)
+{
+
+ struct charger_props *chrgr_prop;
+ int ret = -ENODEV;
+
+ list_for_each_entry(chrgr_prop, &psy_chrgr.chrgr_cache_lst, node) {
+ if (!strcmp(chrgr_prop->name, psy->name)) {
+ memcpy(chrgr_cache, chrgr_prop, sizeof(*chrgr_cache));
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void dump_charger_props(struct charger_props *props)
+{
+ pr_devel("%s:name=%s present=%d is_charging=%d health=%d online=%d cable=%d tstamp=%d\n",
+ __func__, props->name, props->present, props->is_charging,
+ props->health, props->online, props->cable, props->tstamp);
+}
+
+static void dump_battery_props(struct batt_props *props)
+{
+ pr_devel("%s:name=%s voltage_now=%d current_now=%d temperature=%d status=%d health=%d tstamp=%d algo_stat=%d ",
+ __func__, props->name, props->voltage_now, props->current_now,
+ props->temperature, props->status, props->health,
+ props->tstamp, props->algo_stat);
+}
+
+static inline void cache_chrgr_prop(struct charger_props *chrgr_prop_new)
+{
+
+ struct charger_props *chrgr_cache;
+
+ list_for_each_entry(chrgr_cache, &psy_chrgr.chrgr_cache_lst, node) {
+ if (!strcmp(chrgr_cache->name, chrgr_prop_new->name))
+ goto update_props;
+ }
+
+ chrgr_cache = kzalloc(sizeof(*chrgr_cache), GFP_KERNEL);
+ if (chrgr_cache == NULL) {
+ pr_err("%s:%dError in allocating memory\n", __FILE__, __LINE__);
+ return;
+ }
+
+ INIT_LIST_HEAD(&chrgr_cache->node);
+ list_add_tail(&chrgr_cache->node, &psy_chrgr.chrgr_cache_lst);
+
+ chrgr_cache->name = chrgr_prop_new->name;
+
+update_props:
+ chrgr_cache->is_charging = chrgr_prop_new->is_charging;
+ chrgr_cache->online = chrgr_prop_new->online;
+ chrgr_cache->health = chrgr_prop_new->health;
+ chrgr_cache->present = chrgr_prop_new->present;
+ chrgr_cache->cable = chrgr_prop_new->cable;
+ chrgr_cache->tstamp = chrgr_prop_new->tstamp;
+}
+
+
+static inline bool is_chrgr_prop_changed(struct power_supply *psy)
+{
+
+ struct charger_props chrgr_prop_cache, chrgr_prop;
+
+ get_cur_chrgr_prop(psy, &chrgr_prop);
+ /* Get cached battery property. If no cached property available
+ * then cache the new property and return true
+ */
+ if (get_chrgr_prop_cache(psy, &chrgr_prop_cache)) {
+ cache_chrgr_prop(&chrgr_prop);
+ return true;
+ }
+
+ pr_devel("%s\n", __func__);
+ dump_charger_props(&chrgr_prop);
+ dump_charger_props(&chrgr_prop_cache);
+
+ if (!IS_CHARGER_PROP_CHANGED(chrgr_prop, chrgr_prop_cache))
+ return false;
+
+ cache_chrgr_prop(&chrgr_prop);
+ return true;
+}
+static void cache_successive_samples(long *sample_array, long new_sample)
+{
+
+ int i;
+
+ for (i = 0; i < MAX_CUR_VOLT_SAMPLES - 1; ++i)
+ *(sample_array + i) = *(sample_array + i + 1);
+
+ *(sample_array + i) = new_sample;
+
+}
+
+static inline void cache_bat_prop(struct batt_props *bat_prop_new)
+{
+
+ struct batt_props *bat_cache;
+
+ /* Find entry in cache list. If an entry is located update
+ * the existing entry else create new entry in the list */
+ list_for_each_entry(bat_cache, &psy_chrgr.batt_cache_lst, node) {
+ if (!strcmp(bat_cache->name, bat_prop_new->name))
+ goto update_props;
+ }
+
+ bat_cache = kzalloc(sizeof(*bat_cache), GFP_KERNEL);
+ if (bat_cache == NULL) {
+ pr_err("%s:%dError in allocating memory\n", __FILE__, __LINE__);
+ return;
+ }
+ INIT_LIST_HEAD(&bat_cache->node);
+ list_add_tail(&bat_cache->node, &psy_chrgr.batt_cache_lst);
+
+ bat_cache->name = bat_prop_new->name;
+
+update_props:
+ if (time_after(bat_prop_new->tstamp,
+ (bat_cache->tstamp + DEF_CUR_VOLT_SAMPLE_JIFF)) ||
+ bat_cache->tstamp == 0) {
+ cache_successive_samples(bat_cache->voltage_now_cache,
+ bat_prop_new->voltage_now);
+ cache_successive_samples(bat_cache->current_now_cache,
+ bat_prop_new->current_now);
+ bat_cache->tstamp = bat_prop_new->tstamp;
+ }
+
+ bat_cache->voltage_now = bat_prop_new->voltage_now;
+ bat_cache->current_now = bat_prop_new->current_now;
+ bat_cache->health = bat_prop_new->health;
+
+ bat_cache->temperature = bat_prop_new->temperature;
+ bat_cache->status = bat_prop_new->status;
+ bat_cache->algo_stat = bat_prop_new->algo_stat;
+ bat_cache->throttle_state = bat_prop_new->throttle_state;
+}
+
+static inline int get_bat_prop_cache(struct power_supply *psy,
+ struct batt_props *bat_cache)
+{
+
+ struct batt_props *bat_prop;
+ int ret = -ENODEV;
+
+ list_for_each_entry(bat_prop, &psy_chrgr.batt_cache_lst, node) {
+ if (!strcmp(bat_prop->name, psy->name)) {
+ memcpy(bat_cache, bat_prop, sizeof(*bat_cache));
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static inline void get_cur_bat_prop(struct power_supply *psy,
+ struct batt_props *bat_prop)
+{
+ struct batt_props bat_prop_cache;
+ int ret;
+
+ bat_prop->name = psy->name;
+ bat_prop->voltage_now = VOLTAGE_OCV(psy) / 1000;
+ bat_prop->current_now = CURRENT_NOW(psy) / 1000;
+ bat_prop->temperature = TEMPERATURE(psy) / 10;
+ bat_prop->status = STATUS(psy);
+ bat_prop->health = HEALTH(psy);
+ bat_prop->tstamp = get_jiffies_64();
+ bat_prop->throttle_state = CURRENT_THROTTLE_STATE(psy);
+
+ /* Populate cached algo data to new profile */
+ ret = get_bat_prop_cache(psy, &bat_prop_cache);
+ if (!ret)
+ bat_prop->algo_stat = bat_prop_cache.algo_stat;
+}
+
+static inline bool is_batt_prop_changed(struct power_supply *psy)
+{
+
+ struct batt_props bat_prop_cache, bat_prop;
+
+ /* Get cached battery property. If no cached property available
+ * then cache the new property and return true
+ */
+ get_cur_bat_prop(psy, &bat_prop);
+ if (get_bat_prop_cache(psy, &bat_prop_cache)) {
+ cache_bat_prop(&bat_prop);
+ return true;
+ }
+
+ pr_devel("%s\n", __func__);
+ dump_battery_props(&bat_prop);
+ dump_battery_props(&bat_prop_cache);
+
+ if (!IS_BAT_PROP_CHANGED(bat_prop, bat_prop_cache))
+ return false;
+
+ cache_bat_prop(&bat_prop);
+ return true;
+}
+
+static inline bool is_supplied_to_has_ext_pwr_changed(struct power_supply *psy)
+{
+ int i;
+ struct power_supply *psb;
+ bool is_pwr_changed_defined = true;
+
+ for (i = 0; i < psy->num_supplicants; i++) {
+ psb =
+ power_supply_get_by_name(psy->
+ supplied_to[i]);
+ if (psb && !psb->external_power_changed)
+ is_pwr_changed_defined &= false;
+ }
+
+ return is_pwr_changed_defined;
+
+}
+
+static inline bool is_supplied_by_changed(struct power_supply *psy)
+{
+
+ int cnt;
+ struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+ cnt = get_supplied_by_list(psy, chrgr_lst);
+ while (cnt--) {
+ if ((IS_CHARGER(chrgr_lst[cnt])) &&
+ is_chrgr_prop_changed(chrgr_lst[cnt]))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool is_trigger_charging_algo(struct power_supply *psy)
+{
+
+ /* trigger charging alorithm if battery or
+ * charger properties are changed. Also no need to
+ * invoke algorithm for power_supply_changed from
+ * charger, if all supplied_to has the ext_port_changed defined.
+ * On invoking the ext_port_changed the supplied to can send
+ * power_supplied_changed event.
+ */
+
+ if ((IS_CHARGER(psy) && !is_supplied_to_has_ext_pwr_changed(psy)) &&
+ is_chrgr_prop_changed(psy))
+ return true;
+
+ if ((IS_BATTERY(psy)) && (is_batt_prop_changed(psy) ||
+ is_supplied_by_changed(psy)))
+ return true;
+
+ return false;
+}
+
+static int get_supplied_by_list(struct power_supply *psy,
+ struct power_supply *psy_lst[])
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+ struct power_supply *pst;
+ int cnt = 0, i, j;
+
+ if (!IS_BATTERY(psy))
+ return 0;
+
+ /* Identify chargers which are supplying power to the battery */
+ class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ pst = (struct power_supply *)dev_get_drvdata(dev);
+ if (!IS_CHARGER(pst))
+ continue;
+ for (i = 0; i < pst->num_supplicants; i++) {
+ if (!strcmp(pst->supplied_to[i], psy->name))
+ psy_lst[cnt++] = pst;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ if (cnt <= 1)
+ return cnt;
+
+ /*sort based on priority. 0 has the highest priority */
+ for (i = 0; i < cnt; ++i)
+ for (j = 0; j < cnt; ++j)
+ if (PRIORITY(psy_lst[j]) > PRIORITY(psy_lst[i]))
+ swap(psy_lst[j], psy_lst[i]);
+
+ return cnt;
+}
+
+static int get_battery_status(struct power_supply *psy)
+{
+ int cnt, status, ret;
+ struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+ struct batt_props bat_prop;
+
+ if (!IS_BATTERY(psy))
+ return -EINVAL;
+
+ ret = get_bat_prop_cache(psy, &bat_prop);
+ if (ret)
+ return ret;
+
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ cnt = get_supplied_by_list(psy, chrgr_lst);
+
+
+ while (cnt--) {
+
+
+ if (IS_PRESENT(chrgr_lst[cnt]))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (IS_CHARGING_CAN_BE_ENABLED(chrgr_lst[cnt]) &&
+ (IS_HEALTH_GOOD(psy)) &&
+ (IS_HEALTH_GOOD(chrgr_lst[cnt]))) {
+
+ if ((bat_prop.algo_stat == PSY_ALGO_STAT_FULL) ||
+ (bat_prop.algo_stat == PSY_ALGO_STAT_MAINT))
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (IS_CHARGING_ENABLED(chrgr_lst[cnt]))
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ }
+ pr_devel("%s: Set status=%d for %s\n", __func__, status, psy->name);
+
+ return status;
+}
+
+static void update_charger_online(struct power_supply *psy)
+{
+ if (IS_CHARGER_ENABLED(psy))
+ set_charger_online(psy, 1);
+ else
+ set_charger_online(psy, 0);
+}
+
+static void update_sysfs(struct power_supply *psy)
+{
+ int i, cnt;
+ struct power_supply *psb;
+ struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+ if (IS_BATTERY(psy)) {
+ /* set battery status */
+ set_battery_status(psy, get_battery_status(psy));
+
+ /* set charger online */
+ cnt = get_supplied_by_list(psy, chrgr_lst);
+ while (cnt--) {
+ if (!IS_PRESENT(chrgr_lst[cnt]))
+ continue;
+
+ update_charger_online(psy);
+ }
+ } else {
+ /*set battery status */
+ for (i = 0; i < psy->num_supplicants; i++) {
+ psb =
+ power_supply_get_by_name(psy->
+ supplied_to[i]);
+ if (psb && IS_BATTERY(psb) && IS_PRESENT(psb))
+ set_battery_status(psb,
+ get_battery_status(psb));
+ }
+
+ /*set charger online */
+ update_charger_online(psy);
+
+ }
+}
+
+static int trigger_algo(struct power_supply *psy)
+{
+ unsigned long cc = 0, cv = 0, cc_min;
+ struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+ struct batt_props bat_prop;
+ struct charging_algo *algo;
+ struct ps_batt_chg_prof chrg_profile;
+ int cnt;
+
+
+ if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+ return 0;
+
+ if (get_batt_prop(&chrg_profile)) {
+ pr_err("Error in getting charge profile:%s:%d\n", __FILE__,
+ __LINE__);
+ return -EINVAL;
+ }
+
+
+ get_bat_prop_cache(psy, &bat_prop);
+
+ algo = power_supply_get_charging_algo(psy, &chrg_profile);
+ if (!algo) {
+ pr_err("Error in getting charging algo!!\n");
+ return -EINVAL;
+ }
+
+ bat_prop.algo_stat = algo->get_next_cc_cv(bat_prop,
+ chrg_profile, &cc, &cv);
+
+ switch (bat_prop.algo_stat) {
+ case PSY_ALGO_STAT_CHARGE:
+ pr_devel("%s:Algo_status: Charging Enabled\n", __func__);
+ break;
+ case PSY_ALGO_STAT_FULL:
+ pr_devel("%s:Algo_status: Battery is Full\n", __func__);
+ break;
+ case PSY_ALGO_STAT_MAINT:
+ pr_devel("%s:Algo_status: Maintenance charging started\n",
+ __func__);
+ break;
+ case PSY_ALGO_STAT_UNKNOWN:
+ pr_devel("%s:Algo Status: unknown\n", __func__);
+ break;
+ case PSY_ALGO_STAT_NOT_CHARGE:
+ pr_devel("%s:Algo Status: charging not enabled\n",
+ __func__);
+ break;
+ }
+
+ cache_bat_prop(&bat_prop);
+
+ if (!cc || !cv)
+ return -ENODATA;
+
+ /* CC needs to be updated for all chargers which are supplying
+ * power to this battery to ensure that the sum of CCs of all
+ * chargers are never more than the CC selected by the algo.
+ * The CC is set based on the charger priority.
+ */
+ cnt = get_supplied_by_list(psy, chrgr_lst);
+
+ while (cnt--) {
+ if (!IS_PRESENT(chrgr_lst[cnt]))
+ continue;
+
+ cc_min = min_t(unsigned long, MAX_CC(chrgr_lst[cnt]), cc);
+ if (cc_min < 0)
+ cc_min = 0;
+ cc -= cc_min;
+ set_cc(chrgr_lst[cnt], cc_min);
+ set_cv(chrgr_lst[cnt], cv);
+ }
+
+ return 0;
+}
+
+static inline void wait_for_charging_enabled(struct power_supply *psy)
+{
+ wait_event_timeout(psy_chrgr.wait_chrg_enable,
+ (IS_CHARGING_ENABLED(psy)), HZ);
+}
+
+static inline void enable_supplied_by_charging
+ (struct power_supply *psy, bool is_enable)
+{
+ struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+ int cnt;
+
+ if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+ return;
+ /* Get list of chargers supplying power to this battery and
+ * disable charging for all chargers
+ */
+ cnt = get_supplied_by_list(psy, chrgr_lst);
+ if (cnt == 0)
+ return;
+ while (cnt--) {
+ if (!IS_PRESENT(chrgr_lst[cnt]))
+ continue;
+ if (is_enable && IS_CHARGING_CAN_BE_ENABLED(chrgr_lst[cnt])) {
+ enable_charging(chrgr_lst[cnt]);
+ wait_for_charging_enabled(chrgr_lst[cnt]);
+ } else
+ disable_charging(chrgr_lst[cnt]);
+ }
+}
+
+static void __power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+ int i;
+ struct power_supply *psb = NULL;
+
+
+ mutex_lock(&psy_chrgr.evt_lock);
+
+ if (is_trigger_charging_algo(psy)) {
+
+ if (IS_BATTERY(psy)) {
+ if (trigger_algo(psy))
+ enable_supplied_by_charging(psy, false);
+ else
+ enable_supplied_by_charging(psy, true);
+
+ } else if (IS_CHARGER(psy)) {
+ for (i = 0; i < psy->num_supplicants; i++) {
+ psb =
+ power_supply_get_by_name(psy->
+ supplied_to[i]);
+
+ if (psb && IS_BATTERY(psb) && IS_PRESENT(psb)) {
+ if (trigger_algo(psb)) {
+ disable_charging(psy);
+ break;
+ } else if (IS_CHARGING_CAN_BE_ENABLED
+ (psy)) {
+ enable_charging(psy);
+ wait_for_charging_enabled(psy);
+ }
+ }
+ }
+ }
+ update_sysfs(psy);
+ power_supply_changed(psy);
+ }
+ mutex_unlock(&psy_chrgr.evt_lock);
+
+}
+
+static int __trigger_charging_handler(struct device *dev, void *data)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+
+
+ __power_supply_trigger_charging_handler(psy);
+
+ return 0;
+}
+
+static void trigger_algo_psy_class(struct work_struct *work)
+{
+
+ class_for_each_device(power_supply_class, NULL, NULL,
+ __trigger_charging_handler);
+
+}
+
+static bool is_cable_connected(void)
+{
+ int i;
+ struct charger_cable *cable;
+
+ for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+ cable = cable_list + i;
+ if (IS_CABLE_ACTIVE(cable->cable_props.cable_stat))
+ return true;
+ }
+ return false;
+}
+
+void power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+
+ if (!psy_chrgr.is_cable_evt_reg || !is_cable_connected())
+ return;
+
+ wake_up(&psy_chrgr.wait_chrg_enable);
+
+ if (psy)
+ __power_supply_trigger_charging_handler(psy);
+ else
+ schedule_work(&psy_chrgr.algo_trigger_work);
+
+}
+EXPORT_SYMBOL(power_supply_trigger_charging_handler);
+
+static inline int get_battery_thresholds(struct power_supply *psy,
+ struct psy_batt_thresholds *bat_thresh)
+{
+ struct charging_algo *algo;
+ struct ps_batt_chg_prof chrg_profile;
+
+
+ /* FIXME: Get iterm only for supplied_to arguments*/
+ if (get_batt_prop(&chrg_profile)) {
+ pr_err("Error in getting charge profile:%s:%d\n", __FILE__,
+ __LINE__);
+ return -EINVAL;
+ }
+
+ algo = power_supply_get_charging_algo(psy, &chrg_profile);
+ if (!algo) {
+ pr_err("Error in getting charging algo!!\n");
+ return -EINVAL;
+ }
+
+ if (algo->get_batt_thresholds) {
+ algo->get_batt_thresholds(chrg_profile, bat_thresh);
+ } else {
+ pr_err("Error in getting battery thresholds from %s:%s\n",
+ algo->name, __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int select_chrgr_cable(struct device *dev, void *data)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct charger_cable *cable, *max_mA_cable = NULL;
+ struct charger_cable *cable_lst = (struct charger_cable *)data;
+ unsigned int max_mA = 0, iterm;
+ int i;
+
+ if (!IS_CHARGER(psy))
+ return 0;
+
+ mutex_lock(&psy_chrgr.evt_lock);
+
+ /* get cable with maximum capability */
+ for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+ cable = cable_lst + i;
+ if ((!IS_CABLE_ACTIVE(cable->cable_props.cable_stat)) ||
+ (!IS_SUPPORTED_CABLE(psy, cable->psy_cable_type)))
+ continue;
+
+ if (cable->cable_props.mA > max_mA) {
+ max_mA_cable = cable;
+ max_mA = cable->cable_props.mA;
+ }
+ }
+
+ /* no cable connected. disable charging */
+ if (!max_mA_cable) {
+
+ if ((IS_CHARGER_ENABLED(psy) || IS_CHARGING_ENABLED(psy))) {
+ disable_charging(psy);
+ disable_charger(psy);
+ }
+ set_cc(psy, 0);
+ set_cv(psy, 0);
+ set_inlmt(psy, 0);
+
+ /* set present and online as 0 */
+ set_present(psy, 0);
+ update_charger_online(psy);
+
+ switch_cable(psy, POWER_SUPPLY_CHARGER_TYPE_NONE);
+
+ mutex_unlock(&psy_chrgr.evt_lock);
+ power_supply_changed(psy);
+ return 0;
+ }
+
+ /* cable type changed.New cable connected or existing cable
+ * capabilities changed.switch cable and enable charger and charging
+ */
+ set_present(psy, 1);
+
+ if (CABLE_TYPE(psy) != max_mA_cable->psy_cable_type)
+ switch_cable(psy, max_mA_cable->psy_cable_type);
+
+ if (IS_CHARGER_CAN_BE_ENABLED(psy)) {
+ struct psy_batt_thresholds bat_thresh;
+ memset(&bat_thresh, 0, sizeof(bat_thresh));
+ enable_charger(psy);
+
+ update_charger_online(psy);
+
+ set_inlmt(psy, max_mA_cable->cable_props.mA);
+ if (!get_battery_thresholds(psy, &bat_thresh)) {
+ SET_ITERM(psy, bat_thresh.iterm);
+ SET_MIN_TEMP(psy, bat_thresh.temp_min);
+ SET_MAX_TEMP(psy, bat_thresh.temp_max);
+ }
+
+ } else {
+
+ disable_charger(psy);
+ update_charger_online(psy);
+ }
+
+
+ mutex_unlock(&psy_chrgr.evt_lock);
+ power_supply_trigger_charging_handler(NULL);
+ /* Cable status is same as previous. No action to be taken */
+ return 0;
+
+}
+
+static void configure_chrgr_source(struct charger_cable *cable_lst)
+{
+
+ class_for_each_device(power_supply_class, NULL,
+ cable_lst, select_chrgr_cable);
+
+}
+
+static void charger_cable_event_worker(struct work_struct *work)
+{
+ struct charger_cable *cable =
+ container_of(work, struct charger_cable, work);
+ struct extcon_chrgr_cbl_props cable_props;
+
+ if (cable->edev->
+ get_cable_properties(extcon_cable_name[cable->extcon_cable_type],
+ (void *)&cable_props)) {
+ pr_err("Erron in getting cable(%s) properties from extcon device(%s):%s:%d",
+ extcon_cable_name[cable->extcon_cable_type],
+ cable->edev->name, __FILE__, __LINE__);
+ return;
+ } else {
+ if (cable_props.cable_stat != cable->cable_props.cable_stat) {
+ cable->cable_props.cable_stat = cable_props.cable_stat;
+ cable->cable_props.mA = cable_props.mA;
+ configure_chrgr_source(cable_list);
+ }
+ }
+
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+ unsigned long stat, void *ptr)
+{
+
+ struct charger_cable *cable =
+ container_of(nb, struct charger_cable, nb);
+
+ schedule_work(&cable->work);
+
+ return NOTIFY_DONE | NOTIFY_STOP_MASK;
+}
+
+int psy_charger_throttle_charger(struct power_supply *psy,
+ unsigned long state)
+{
+ int ret = 0;
+
+ if (state < 0 || state > MAX_THROTTLE_STATE(psy))
+ return -EINVAL;
+
+ mutex_lock(&psy_chrgr.evt_lock);
+
+ switch THROTTLE_ACTION(psy, state)
+ {
+
+ case PSY_THROTTLE_DISABLE_CHARGER:
+ SET_MAX_CC(psy, 0);
+ disable_charger(psy);
+ break;
+ case PSY_THROTTLE_DISABLE_CHARGING:
+ SET_MAX_CC(psy, 0);
+ disable_charging(psy);
+ break;
+ case PSY_THROTTLE_CC_LIMIT:
+ SET_MAX_CC(psy, THROTTLE_CC_VALUE(psy, state));
+ break;
+ case PSY_THROTTLE_INPUT_LIMIT:
+ set_inlmt(psy, THROTTLE_CC_VALUE(psy, state));
+ break;
+ default:
+ pr_err("Invalid throttle action for %s\n", psy->name);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&psy_chrgr.evt_lock);
+
+ /* Configure the driver based on new state */
+ if (!ret)
+ configure_chrgr_source(cable_list);
+ return ret;
+}
+EXPORT_SYMBOL(psy_charger_throttle_charger);
+
+int power_supply_register_charger(struct power_supply *psy)
+{
+ int ret = 0;
+
+ if (!psy_chrgr.is_cable_evt_reg) {
+ mutex_init(&psy_chrgr.evt_lock);
+ init_waitqueue_head(&psy_chrgr.wait_chrg_enable);
+ init_charger_cables(cable_list, ARRAY_SIZE(cable_list));
+ INIT_LIST_HEAD(&psy_chrgr.chrgr_cache_lst);
+ INIT_LIST_HEAD(&psy_chrgr.batt_cache_lst);
+ INIT_WORK(&psy_chrgr.algo_trigger_work, trigger_algo_psy_class);
+ psy_chrgr.is_cable_evt_reg = true;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(power_supply_register_charger);
+
+static inline void flush_charger_context(struct power_supply *psy)
+{
+ struct charger_props *chrgr_prop, *tmp;
+
+
+ list_for_each_entry_safe(chrgr_prop, tmp,
+ &psy_chrgr.chrgr_cache_lst, node) {
+ if (!strcmp(chrgr_prop->name, psy->name)) {
+ list_del(&chrgr_prop->node);
+ kfree(chrgr_prop);
+ }
+ }
+}
+int power_supply_unregister_charger(struct power_supply *psy)
+{
+ flush_charger_context(psy);
+ return 0;
+}
+EXPORT_SYMBOL(power_supply_unregister_charger);
+
+int power_supply_register_charging_algo(struct charging_algo *algo)
+{
+
+ struct charging_algo *algo_new;
+
+ algo_new = kzalloc(sizeof(*algo_new), GFP_KERNEL);
+ if (algo_new == NULL) {
+ pr_err("%s: Error allocating memory for algo!!", __func__);
+ return -1;
+ }
+ memcpy(algo_new, algo, sizeof(*algo_new));
+
+ list_add_tail(&algo_new->node, &algo_list);
+ return 0;
+}
+EXPORT_SYMBOL(power_supply_register_charging_algo);
+
+int power_supply_unregister_charging_algo(struct charging_algo *algo)
+{
+ struct charging_algo *algo_l, *tmp;
+
+ list_for_each_entry_safe(algo_l, tmp, &algo_list, node) {
+ if (!strcmp(algo_l->name, algo->name)) {
+ list_del(&algo_l->node);
+ kfree(algo_l);
+ }
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL(power_supply_unregister_charging_algo);
+
+static struct charging_algo *get_charging_algo_byname(char *algo_name)
+{
+ struct charging_algo *algo;
+
+ list_for_each_entry(algo, &algo_list, node) {
+ if (!strcmp(algo->name, algo_name))
+ return algo;
+ }
+
+ return NULL;
+}
+
+static struct charging_algo *get_charging_algo_by_type
+ (enum batt_chrg_prof_type chrg_prof_type)
+{
+ struct charging_algo *algo;
+
+ list_for_each_entry(algo, &algo_list, node) {
+ if (algo->chrg_prof_type == chrg_prof_type)
+ return algo;
+ }
+
+ return NULL;
+}
+
+struct charging_algo *power_supply_get_charging_algo
+ (struct power_supply *psy, struct ps_batt_chg_prof *batt_prof)
+{
+
+ return get_charging_algo_by_type(batt_prof->chrg_prof_type);
+
+}
+EXPORT_SYMBOL_GPL(power_supply_get_charging_algo);
--- /dev/null
+
+#ifndef __POWER_SUPPLY_CHARGER_H__
+
+#define __POWER_SUPPLY_CHARGER_H__
+#include <linux/power/battery_id.h>
+#include <linux/power_supply.h>
+
+#define MAX_CUR_VOLT_SAMPLES 3
+#define DEF_CUR_VOLT_SAMPLE_JIFF (30*HZ)
+
+enum psy_algo_stat {
+ PSY_ALGO_STAT_UNKNOWN,
+ PSY_ALGO_STAT_NOT_CHARGE,
+ PSY_ALGO_STAT_CHARGE,
+ PSY_ALGO_STAT_FULL,
+ PSY_ALGO_STAT_MAINT,
+};
+
+struct batt_props {
+ struct list_head node;
+ const char *name;
+ long voltage_now;
+ long voltage_now_cache[MAX_CUR_VOLT_SAMPLES];
+ long current_now;
+ long current_now_cache[MAX_CUR_VOLT_SAMPLES];
+ int temperature;
+ long status;
+ unsigned long tstamp;
+ enum psy_algo_stat algo_stat;
+ int health;
+ int throttle_state;
+};
+
+struct charger_props {
+ struct list_head node;
+ const char *name;
+ bool present;
+ bool is_charging;
+ int health;
+ bool online;
+ unsigned long cable;
+ unsigned long tstamp;
+};
+
+struct psy_batt_thresholds {
+ int temp_min;
+ int temp_max;
+ unsigned int iterm;
+};
+
+struct charging_algo {
+ struct list_head node;
+ unsigned int chrg_prof_type;
+ char *name;
+ enum psy_algo_stat (*get_next_cc_cv)(struct batt_props,
+ struct ps_batt_chg_prof, unsigned long *cc,
+ unsigned long *cv);
+ int (*get_batt_thresholds)(struct ps_batt_chg_prof,
+ struct psy_batt_thresholds *bat_thr);
+};
+
+
+extern int power_supply_register_charging_algo(struct charging_algo *);
+extern int power_supply_unregister_charging_algo(struct charging_algo *);
+
+static inline int set_ps_int_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ int prop_val)
+{
+
+ union power_supply_propval val;
+
+ val.intval = prop_val;
+ return psy->set_property(psy, psp, &val);
+}
+
+static inline int get_ps_int_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ union power_supply_propval val;
+
+ val.intval = 0;
+
+ psy->get_property(psy, psp, &val);
+ return val.intval;
+}
+/* Define a TTL for some properies to optimize the frequency of
+* algorithm calls. This can be used by properties which will be changed
+* very frequently (eg. current, volatge..)
+*/
+#define PROP_TTL (HZ*10)
+#define enable_charging(psy) \
+ ({if ((CABLE_TYPE(psy) != POWER_SUPPLY_CHARGER_TYPE_NONE) &&\
+ !IS_CHARGING_ENABLED(psy)) { \
+ enable_charger(psy); \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING,\
+ true); } })
+#define disable_charging(psy) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_ENABLE_CHARGING, false);
+
+#define enable_charger(psy) \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER, true)
+#define disable_charger(psy) \
+ ({ disable_charging(psy); \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_ENABLE_CHARGER, false); })
+
+#define set_cc(psy, cc) \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT, cc)
+
+#define set_cv(psy, cv) \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE, cv)
+
+#define set_inlmt(psy, inlmt) \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT, inlmt)
+
+#define set_present(psy, present) \
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT, present)
+
+#define SET_MAX_CC(psy, max_cc) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT, max_cc)
+#define SET_ITERM(psy, iterm) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_CHARGE_TERM_CUR, iterm)
+#define SET_MAX_TEMP(psy, temp) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_MAX_TEMP, temp)
+#define SET_MIN_TEMP(psy, temp) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_MIN_TEMP, temp)
+#define switch_cable(psy, new_cable) \
+ set_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_CABLE_TYPE, new_cable)
+
+#define HEALTH(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_HEALTH)
+#define CV(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE)
+#define CC(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT)
+#define INLMT(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT)
+#define MAX_CC(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT)
+#define MAX_CV(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE)
+#define VOLTAGE_NOW(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW)
+#define VOLTAGE_OCV(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_OCV)
+#define CURRENT_NOW(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW)
+#define STATUS(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS)
+#define TEMPERATURE(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_TEMP)
+#define BATTERY_TYPE(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY)
+#define PRIORITY(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_PRIORITY)
+#define CABLE_TYPE(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_CABLE_TYPE)
+#define ONLINE(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE)
+
+#define IS_CHARGING_ENABLED(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING)
+#define IS_CHARGER_ENABLED(psy) \
+ get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER)
+#define IS_BATTERY(psy) (psy->type == POWER_SUPPLY_TYPE_BATTERY)
+#define IS_CHARGER(psy) (psy->type == POWER_SUPPLY_TYPE_USB ||\
+ psy->type == POWER_SUPPLY_TYPE_USB_CDP || \
+ psy->type == POWER_SUPPLY_TYPE_USB_DCP ||\
+ psy->type == POWER_SUPPLY_TYPE_USB_ACA)
+#define IS_ONLINE(psy) \
+ (get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE) == 1)
+#define IS_PRESENT(psy) \
+ (get_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT) == 1)
+#define IS_SUPPORTED_CABLE(psy, cable_type) \
+ (psy->supported_cables & cable_type)
+#define IS_CABLE_ACTIVE(status) \
+ (!((status == EXTCON_CHRGR_CABLE_DISCONNECTED) ||\
+ (status == EXTCON_CHRGR_CABLE_SUSPENDED)))
+
+#define IS_CHARGER_PROP_CHANGED(prop, cache_prop)\
+ ((cache_prop.online != prop.online) || \
+ (cache_prop.present != prop.present) || \
+ (cache_prop.is_charging != prop.is_charging) || \
+ (cache_prop.health != prop.health))
+
+#define IS_BAT_PROP_CHANGED(bat_prop, bat_cache)\
+ ((bat_cache.voltage_now != bat_prop.voltage_now) || \
+ (time_after64(bat_prop.tstamp, (bat_cache.tstamp + PROP_TTL)) &&\
+ ((bat_cache.current_now != bat_prop.current_now) || \
+ (bat_cache.voltage_now != bat_prop.voltage_now))) || \
+ (bat_cache.temperature != bat_prop.temperature) || \
+ (bat_cache.health != bat_prop.health) || \
+ (bat_cache.throttle_state != bat_prop.throttle_state))
+
+#define THROTTLE_ACTION(psy, state)\
+ (((psy->throttle_states)+state)->throttle_action)
+
+#define MAX_THROTTLE_STATE(psy)\
+ ((psy->num_throttle_states))
+
+#define CURRENT_THROTTLE_STATE(psy)\
+ (get_ps_int_property(psy,\
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT))
+
+#define CURRENT_THROTTLE_ACTION(psy)\
+ THROTTLE_ACTION(psy, CURRENT_THROTTLE_STATE(psy))
+
+#define THROTTLE_CC_VALUE(psy, state)\
+ (((psy->throttle_states)+state)->throttle_val)
+
+#define IS_CHARGING_CAN_BE_ENABLED(psy) \
+ ((CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER) &&\
+ (CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGING))
+#define IS_CHARGER_CAN_BE_ENABLED(psy) \
+ (CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER)
+
+#define IS_HEALTH_GOOD(psy)\
+ (HEALTH(psy) == POWER_SUPPLY_HEALTH_GOOD)
+
+static inline void set_battery_status(struct power_supply *psy, int status)
+{
+
+ if (STATUS(psy) != status)
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS, status);
+
+
+}
+
+static inline void set_charger_online(struct power_supply *psy, int online)
+{
+
+ if (ONLINE(psy) != online)
+ set_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE, online);
+
+}
+
+#endif
#include <linux/power_supply.h>
#include <linux/thermal.h>
#include "power_supply.h"
+#include "power_supply_charger.h"
/* exported for the APM Power driver, APM emulation */
struct class *power_supply_class;
static struct device_type power_supply_dev_type;
+static struct mutex ps_chrg_evt_lock;
+
+static struct power_supply_charger_cap power_supply_chrg_cap = {
+ .chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+ .chrg_type = POWER_SUPPLY_TYPE_USB,
+ .mA = 0 /* 0 mA */
+};
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
struct power_supply *supply)
{
class_for_each_device(power_supply_class, NULL, psy,
__power_supply_changed_work);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+
+ power_supply_trigger_charging_handler(psy);
+
power_supply_update_leds(psy);
kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
+ if (psy == NULL) {
+ power_supply_trigger_charging_handler(psy);
+ return;
+ }
+
dev_dbg(psy->dev, "%s\n", __func__);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
+static int __power_supply_charger_event(struct device *dev, void *data)
+{
+ struct power_supply_charger_cap *cap =
+ (struct power_supply_charger_cap *)data;
+ struct power_supply *psy = dev_get_drvdata(dev);
+
+ if (psy->charging_port_changed)
+ psy->charging_port_changed(psy, cap);
+
+ return 0;
+}
+
+void power_supply_charger_event(struct power_supply_charger_cap cap)
+{
+ class_for_each_device(power_supply_class, NULL, &cap,
+ __power_supply_charger_event);
+
+ mutex_lock(&ps_chrg_evt_lock);
+ memcpy(&power_supply_chrg_cap, &cap, sizeof(power_supply_chrg_cap));
+ mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_charger_event);
+
+void power_supply_query_charger_caps(struct power_supply_charger_cap *cap)
+{
+ mutex_lock(&ps_chrg_evt_lock);
+ memcpy(cap, &power_supply_chrg_cap, sizeof(power_supply_chrg_cap));
+ mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_query_charger_caps);
+
#ifdef CONFIG_OF
#include <linux/of.h>
unsigned int count = 0;
error = class_for_each_device(power_supply_class, NULL, &count,
- __power_supply_is_system_supplied);
+ __power_supply_is_system_supplied);
/*
- * If no power class device was found at all, most probably we are
- * running on a desktop system, so assume we are on mains power.
- */
+ * If no power class device was found at all, most probably we are
+ * running on a desktop system, so assume we are on mains power.
+ */
if (count == 0)
return 1;
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
+static int __power_supply_is_battery_connected(struct device *dev, void *data)
+{
+ union power_supply_propval ret = {0,};
+ struct power_supply *psy = dev_get_drvdata(dev);
+
+ if (psy->type == POWER_SUPPLY_TYPE_BATTERY) {
+ if (psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, &ret))
+ return 0;
+ if (ret.intval)
+ return ret.intval;
+ }
+ return 0;
+}
+
+int power_supply_is_battery_connected(void)
+{
+ int error;
+
+ error = class_for_each_device(power_supply_class, NULL, NULL,
+ __power_supply_is_battery_connected);
+ return error;
+}
+EXPORT_SYMBOL_GPL(power_supply_is_battery_connected);
+
int power_supply_set_battery_charged(struct power_supply *psy)
{
if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
union power_supply_propval val;
int ret;
- WARN_ON(tzd == NULL);
+ if (WARN_ON(tzd == NULL))
+ return -EINVAL;
psy = tzd->devdata;
ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
union power_supply_propval val;
int ret;
+ if (WARN_ON(tcd == NULL))
+ return -EINVAL;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
union power_supply_propval val;
int ret;
+ if (WARN_ON(tcd == NULL))
+ return -EINVAL;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
union power_supply_propval val;
int ret;
+ if (WARN_ON(tcd == NULL))
+ return -EINVAL;
psy = tcd->devdata;
val.intval = state;
ret = psy->set_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ psy_charger_throttle_charger(psy, state);
+
return ret;
}
if (rc)
goto create_triggers_failed;
+ if (IS_CHARGER(psy))
+ rc = power_supply_register_charger(psy);
+ if (rc)
+ goto charger_register_failed;
+
power_supply_changed(psy);
goto success;
+charger_register_failed:
create_triggers_failed:
psy_unregister_cooler(psy);
register_cooler_failed:
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
+ if (IS_CHARGER(psy))
+ power_supply_unregister_charger(psy);
+ power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_unregister(psy->dev);
power_supply_class->dev_uevent = power_supply_uevent;
power_supply_init_attrs(&power_supply_dev_type);
+ mutex_init(&ps_chrg_evt_lock);
return 0;
}
struct device_attribute *attr,
char *buf) {
static char *type_text[] = {
- "Unknown", "Battery", "UPS", "Mains", "USB",
+ "Unknown", "Battery", "UPS", "Mains", "USB", "USB",
"USB_DCP", "USB_CDP", "USB_ACA"
};
static char *status_text[] = {
POWER_SUPPLY_ATTR(constant_charge_current_max),
POWER_SUPPLY_ATTR(constant_charge_voltage),
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
+ POWER_SUPPLY_ATTR(charge_current_limit),
POWER_SUPPLY_ATTR(charge_control_limit),
POWER_SUPPLY_ATTR(charge_control_limit_max),
+ POWER_SUPPLY_ATTR(charge_current),
+ POWER_SUPPLY_ATTR(max_charge_current),
+ POWER_SUPPLY_ATTR(charge_voltage),
+ POWER_SUPPLY_ATTR(max_charge_voltage),
+ POWER_SUPPLY_ATTR(input_cur_limit),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
POWER_SUPPLY_ATTR(energy_full),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_alert_min),
POWER_SUPPLY_ATTR(temp_alert_max),
+ POWER_SUPPLY_ATTR(max_temp),
+ POWER_SUPPLY_ATTR(min_temp),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(temp_ambient_alert_min),
POWER_SUPPLY_ATTR(temp_ambient_alert_max),
POWER_SUPPLY_ATTR(time_to_full_now),
POWER_SUPPLY_ATTR(time_to_full_avg),
POWER_SUPPLY_ATTR(type),
+ POWER_SUPPLY_ATTR(charge_term_cur),
+ POWER_SUPPLY_ATTR(enable_charging),
+ POWER_SUPPLY_ATTR(enable_charger),
+ POWER_SUPPLY_ATTR(cable_type),
+ POWER_SUPPLY_ATTR(priority),
POWER_SUPPLY_ATTR(scope),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
if PWM
+config PWM_SYSFS
+ bool
+ default y if SYSFS
+
config PWM_AB8500
tristate "AB8500 PWM support"
depends on AB8500_CORE && ARCH_U8500
To compile this driver as a module, choose M here: the module
will be called pwm-vt8500.
+config PWM_INTEL_MID
+ tristate "Support for Intel MID PWM"
+ help
+ This option enables support for Intel Mid PWM Driver. Say Y
+ here if you want to enable the PWM functionality.
+
+ To compile this driver as a module, choose M here. The module will
+ be called pwm-intel-mid.
+
endif
obj-$(CONFIG_PWM) += core.o
+obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
+obj-$(CONFIG_PWM_INTEL_MID) += pwm-intel-mid.o
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_add(chip);
+ pwmchip_sysfs_export(chip);
+
out:
mutex_unlock(&pwm_lock);
return ret;
free_pwms(chip);
+ pwmchip_sysfs_unexport(chip);
+
out:
mutex_unlock(&pwm_lock);
return ret;
*/
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
+ int err;
+
if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
return -EINVAL;
- return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+ err = pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+ if (err)
+ return err;
+
+ pwm->duty_cycle = duty_ns;
+ pwm->period = period_ns;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pwm_config);
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
{
+ int err;
+
if (!pwm || !pwm->chip->ops)
return -EINVAL;
if (test_bit(PWMF_ENABLED, &pwm->flags))
return -EBUSY;
- return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
+ err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
+ if (err)
+ return err;
+
+ pwm->polarity = polarity;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pwm_set_polarity);
--- /dev/null
+/*
+ * pwm-intel-mid.c: Driver for PWM on Intel MID platform
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define PWM_INTEL_MID_DRIVER_NAME "pwm-intel-mid"
+#define PCI_DEVICE_ID_INTEL_MID_MRFLD 0x11a5
+#define CLOCK_RATE 19200000
+#define BASE_10 10
+#define NSECS_PER_SEC 1000000000UL
+#define PWM_PERIOD_NS_MAX 218453000 /* about 4.6 Hz */
+#define PWM_PERIOD_NS_MIN 104 /* about 9.6 MHz */
+#define PWM_ON_TIME_DIVISOR_BITS 8
+#define PWM_BASE_UNIT_FRAC_BITS 14
+#define PWM_BASE_UNIT_INT_BITS 8
+#define PWM_COMPARE_UNIT_SIZE 256UL
+#define PWM_DYNAMYC_RANGE_DEFAULT 100UL
+#define PWM_DYNAMYC_RANGE_THRESHOLD 13333
+#define PWM_DEFAULT_PERIOD 4950495 /* about 200 Hz */
+#define PWM_CONTROL_REGISTER_SIZE 0x400
+
+#define get_control_register(pwm, pwm_id) \
+ ((u8 *)pwm->regs + (pwm_id * PWM_CONTROL_REGISTER_SIZE));
+
+union pwmctrl_reg {
+ struct {
+ u32 on_time_divisor:PWM_ON_TIME_DIVISOR_BITS;
+ u32 base_unit_frac:PWM_BASE_UNIT_FRAC_BITS;
+ u32 base_unit_int:PWM_BASE_UNIT_INT_BITS;
+ u32 sw_update:1;
+ u32 enable:1;
+ } part;
+ u32 full;
+};
+
+struct intel_mid_pwm_chip {
+ struct pwm_chip chip;
+ void __iomem *regs;
+ union pwmctrl_reg *pwmctrls;
+ int num_of_pwms;
+};
+
+static inline struct intel_mid_pwm_chip *
+to_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct intel_mid_pwm_chip, chip);
+}
+
+static inline void
+pwm_set_enable_bit(void __iomem *reg, union pwmctrl_reg *pwmctrl, u8 value)
+{
+ pwmctrl->full = readl(reg);
+ pwmctrl->part.enable = value;
+ writel(pwmctrl->full, reg);
+}
+
+static void
+intel_mid_pwm_on_time_divisor(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+ const u32 period, const u32 duty_cycle)
+{
+ u64 on_time_divisor;
+
+ /* Calculate and set on time divisor */
+ on_time_divisor = duty_cycle * (u64)(PWM_COMPARE_UNIT_SIZE - 1UL);
+ do_div(on_time_divisor, period);
+ on_time_divisor = PWM_COMPARE_UNIT_SIZE - on_time_divisor - 1UL;
+
+ pwmctrl->full = readl(reg);
+ pwmctrl->part.on_time_divisor = on_time_divisor;
+ writel(pwmctrl->full, reg);
+}
+
+static int
+intel_mid_pwm_base_unit(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+ const u32 period, const u32 clock_rate)
+{
+ u32 dynamic_range = PWM_DYNAMYC_RANGE_DEFAULT;
+ u64 rest;
+ u64 tmp;
+ u64 fraction = 0;
+ u64 numerator = 1;
+ u64 base_unit_integer;
+ u64 frequency = NSECS_PER_SEC;
+ u32 base_unit_fraction = 0;
+ int i;
+
+ /* The dynamic range multiplier is used to get more accurate
+ calculations when the frequency is small (less than 75 KHz) in the
+ fraction part of base unit. In some way when requesting low frequencies
+ all calculations are done using the period in nano-secs e-2. For high
+ frecuencies we use nano-secs only. */
+ if (period < PWM_DYNAMYC_RANGE_THRESHOLD)
+ dynamic_range = 1UL;
+ frequency *= dynamic_range;
+
+ /* calculate frequency: f (hz) = 1e9 (ns/ps) / p (ns/ps). Result is in
+ Hz depending on dynamic_range being 1. */
+ do_div(frequency, period);
+
+ /* base_unit is a 22 bits register composed of a fractional part (first
+ 14 bits) and an integer part (next 8 bits). The integer part
+ calculation is trivial. Done in place by do_div() below. */
+ base_unit_integer = frequency * PWM_COMPARE_UNIT_SIZE;
+
+ rest = do_div(base_unit_integer, clock_rate * dynamic_range);
+
+ /* The fractional part of base_unit needs to be calculated and converted
+ to binary fixed point notation. Two steps will be needed to do the
+ calculation. First to calculate the fraction part in decimal and
+ secondly to convert it to binary fixed point. Due to lack of float
+ support in the kernel we'll use the rest of (frequency *
+ PWM_COMPARE_UNIT_SIZE / clock_speed) division to calculate it and then
+ following the standard division algorithm the rest will be multiplied
+ by 10 and divided by clock_rate several times until the desired level
+ of precision is reached. At the end it will look like this:
+ base_unit_fraction = fraction / numerator where numerator is a power
+ of 10.
+
+ E.g.: base_unit = 1.123, base_unit_fraction = 0.123,
+ fraction = 123, numerator = 1000 */
+ for (i = 0; i < PWM_BASE_UNIT_FRAC_BITS; i++) {
+ tmp = rest * BASE_10;
+ rest = do_div(tmp, clock_rate * dynamic_range);
+ fraction += tmp;
+ fraction *= BASE_10;
+ numerator *= BASE_10;
+ }
+ do_div(fraction, BASE_10);
+
+ /* At this point we've got the fraction and numerator done following
+ the above description. The binary fixed point conversion is done by
+ repeated multiplications of the fraction (but using fractions (fra/num)
+ instead of floats). When the fraction is multipled by 2 and gets greater
+ or equal than 1 (or in our case frac/num >= 1 -> frac >= num) then we
+ know the next digit in the binary fixed point number will be a '1'.
+ Also this excess needs to be removed. In the original algorithm the
+ overflow digit is substracted. In our case we can substract the
+ numerator. */
+ for (i = 0; i < PWM_BASE_UNIT_FRAC_BITS; i++) {
+ /* Multiply fraction by 2 */
+ fraction <<= 1;
+ base_unit_fraction <<= 1;
+
+ /* frac / num >= 1 -> set next bit to '1' and remove "overflow
+ digit" */
+ if (fraction >= numerator) {
+ base_unit_fraction |= 1;
+ fraction -= numerator;
+ }
+ }
+ /* If both the values are 0, the output will be somehow not correct.
+ * So if it happens, change the fraction to 1.
+ */
+ if ((0 == base_unit_fraction) && (0 == base_unit_integer))
+ base_unit_fraction = 1UL;
+
+ pwmctrl->full = readl(reg);
+ pwmctrl->part.base_unit_int = (u32)base_unit_integer;
+ pwmctrl->part.base_unit_frac = base_unit_fraction;
+ writel(pwmctrl->full, reg);
+
+ return 0;
+}
+
+static int
+intel_mid_pwm_setup(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+ int duty_ns, int period_ns)
+{
+ int ret;
+
+ /* Calculate and set base_unit */
+ ret = intel_mid_pwm_base_unit(reg, pwmctrl, period_ns, CLOCK_RATE);
+ if (ret)
+ return ret;
+
+ /* Calculate and set on time divisor */
+ intel_mid_pwm_on_time_divisor(reg, pwmctrl, period_ns, duty_ns);
+
+ /* Set software update bit */
+ pwmctrl->part.sw_update = 1UL;
+ writel(pwmctrl->full, reg);
+
+ return 0;
+}
+
+static int
+intel_mid_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
+ int duty_ns, int period_ns)
+{
+ struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+ union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+ void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+ dev_dbg(chip->dev, "%s: period_ns %d, duty_ns %d\n", __func__,
+ period_ns, duty_ns);
+
+ /* Check the period is valid within HW capabilities */
+ if (period_ns < PWM_PERIOD_NS_MIN || period_ns > PWM_PERIOD_NS_MAX) {
+ dev_err(chip->dev, "Period (ns) must be in range %u:%u\n",
+ PWM_PERIOD_NS_MIN, PWM_PERIOD_NS_MAX);
+ return -EINVAL;
+ }
+
+ return intel_mid_pwm_setup(reg, pwmctrl, duty_ns, period_ns);
+}
+
+static int
+intel_mid_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm_dev)
+{
+ struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+ union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+ int ret;
+ void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+ pm_runtime_get_sync(chip->dev);
+
+ ret = intel_mid_pwm_setup(reg, pwmctrl, pwm_dev->duty_cycle,
+ pwm_dev->period);
+ if (ret)
+ return ret;
+
+ pwm_set_enable_bit(reg, pwmctrl, 1U);
+
+ dev_dbg(chip->dev, "%s: pwmctrl %#x\n", __func__, pwmctrl->full);
+
+ return 0;
+}
+
+static void
+intel_mid_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm_dev)
+{
+ struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+ union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+ void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+ pwm_set_enable_bit(reg, pwmctrl, 0);
+ pm_runtime_put(chip->dev);
+ dev_dbg(chip->dev, "%s: pwmctrl %#x\n", __func__, pwmctrl->full);
+}
+
+static const struct pwm_ops intel_mid_pwm_ops = {
+ .config = intel_mid_pwm_config,
+ .enable = intel_mid_pwm_enable,
+ .disable = intel_mid_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int
+intel_mid_pwm_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ struct intel_mid_pwm_chip *pwm;
+ int ret, i;
+ resource_size_t resource_len;
+
+ pwm = devm_kzalloc(&pci->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm) {
+ dev_err(&pci->dev, "Can't allocate memory for pwm\n");
+ return -ENOMEM;
+ }
+
+ /* Init the device */
+ ret = pci_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev, "Can't enable pci device\n");
+ return ret;
+ }
+
+ ret = pci_request_regions(pci, PWM_INTEL_MID_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pci->dev, "Can't request regions\n");
+ goto do_disable_device;
+ }
+ pci_dev_get(pci);
+
+ pwm->regs = pci_ioremap_bar(pci, 0);
+ if (!pwm->regs) {
+ dev_err(&pci->dev, "ioremap failed\n");
+ ret = -EIO;
+ goto do_disable_device;
+ }
+
+ /* Calculate number of available pwm modules */
+ resource_len = pci_resource_len(pci, 0);
+ do_div(resource_len, PWM_CONTROL_REGISTER_SIZE);
+ pwm->num_of_pwms = resource_len;
+
+ /* allocate memory for PWM control register images */
+ pwm->pwmctrls = devm_kzalloc(&pci->dev,
+ sizeof(*pwm->pwmctrls) * pwm->num_of_pwms, GFP_KERNEL);
+ if (!pwm->pwmctrls) {
+ dev_err(&pci->dev, "Can't allocate memory for pwm pwmctrls\n");
+ ret = -ENOMEM;
+ goto do_unmap_regs;
+ }
+
+ /* register the driver with PWM framework */
+ pwm->chip.dev = &pci->dev;
+ pwm->chip.ops = &intel_mid_pwm_ops;
+ pwm->chip.base = -1;
+ pwm->chip.npwm = pwm->num_of_pwms;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret) {
+ dev_err(&pci->dev, "Failed to add PWM chip: %d\n", ret);
+ goto do_unmap_regs;
+ }
+
+ /* Set default frequency/period (about 200Hz) on all pwm modules. */
+ for (i = 0; i < pwm->num_of_pwms; i++)
+ pwm->chip.pwms[i].period = PWM_DEFAULT_PERIOD;
+
+ pci_set_drvdata(pci, pwm);
+ pm_runtime_allow(&pci->dev);
+ pm_runtime_put_noidle(&pci->dev);
+
+ return ret;
+
+do_unmap_regs:
+ iounmap(pwm->regs);
+ pci_release_regions(pci);
+do_disable_device:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static void
+intel_mid_pwm_remove(struct pci_dev *pci)
+{
+ struct intel_mid_pwm_chip *pwm = pci_get_drvdata(pci);
+
+ pm_runtime_get_noresume(&pci->dev);
+ pm_runtime_forbid(&pci->dev);
+ pwmchip_remove(&pwm->chip);
+ iounmap(pwm->regs);
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+ pci_set_drvdata(pci, NULL);
+}
+
+#if CONFIG_PM
+static int
+intel_mid_pwm_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int
+intel_mid_pwm_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static void
+intel_mid_pwm_runtime_complete(struct device *dev) {
+}
+
+static const struct dev_pm_ops intel_mid_pm_ops = {
+ .prepare = intel_mid_pwm_runtime_suspend,
+ .complete = intel_mid_pwm_runtime_complete,
+ .runtime_suspend = intel_mid_pwm_runtime_suspend,
+ .runtime_resume = intel_mid_pwm_runtime_resume,
+};
+#endif
+
+/* PCI Routines */
+static DEFINE_PCI_DEVICE_TABLE(intel_mid_pwm_pci_ids) = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MID_MRFLD), 0},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_mid_pwm_pci_ids);
+
+static struct pci_driver intel_mid_pci_driver = {
+ .name = PWM_INTEL_MID_DRIVER_NAME,
+ .id_table = intel_mid_pwm_pci_ids,
+ .probe = intel_mid_pwm_probe,
+ .remove = intel_mid_pwm_remove,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &intel_mid_pm_ops,
+ },
+#endif
+};
+
+module_pci_driver(intel_mid_pci_driver);
+
+MODULE_ALIAS("pci:" PWM_INTEL_MID_DRIVER_NAME);
+MODULE_DESCRIPTION("Intel(R) MID PWM driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>");
+
--- /dev/null
+/*
+ * A simple sysfs interface for the generic PWM framework
+ *
+ * Copyright (C) 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on previous work by Lars Poeschel <poeschel@lemonage.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/pwm.h>
+
+struct pwm_export {
+ struct device child;
+ struct pwm_device *pwm;
+};
+
+static struct pwm_export *child_to_pwm_export(struct device *child)
+{
+ return container_of(child, struct pwm_export, child);
+}
+
+static struct pwm_device *child_to_pwm_device(struct device *child)
+{
+ struct pwm_export *export = child_to_pwm_export(child);
+
+ return export->pwm;
+}
+
+static ssize_t pwm_period_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct pwm_device *pwm = child_to_pwm_device(child);
+
+ return sprintf(buf, "%u\n", pwm->period);
+}
+
+static ssize_t pwm_period_store(struct device *child,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = pwm_config(pwm, pwm->duty_cycle, val);
+
+ return ret ? : size;
+}
+
+static ssize_t pwm_duty_cycle_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct pwm_device *pwm = child_to_pwm_device(child);
+
+ return sprintf(buf, "%u\n", pwm->duty_cycle);
+}
+
+static ssize_t pwm_duty_cycle_store(struct device *child,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = pwm_config(pwm, val, pwm->period);
+
+ return ret ? : size;
+}
+
+static ssize_t pwm_enable_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct pwm_device *pwm = child_to_pwm_device(child);
+ int enabled = test_bit(PWMF_ENABLED, &pwm->flags);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t pwm_enable_store(struct device *child,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ int val, ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ pwm_disable(pwm);
+ break;
+ case 1:
+ ret = pwm_enable(pwm);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret ? : size;
+}
+
+static ssize_t pwm_polarity_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct pwm_device *pwm = child_to_pwm_device(child);
+
+ return sprintf(buf, "%s\n", pwm->polarity ? "inversed" : "normal");
+}
+
+static ssize_t pwm_polarity_store(struct device *child,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ enum pwm_polarity polarity;
+ int ret;
+
+ if (sysfs_streq(buf, "normal"))
+ polarity = PWM_POLARITY_NORMAL;
+ else if (sysfs_streq(buf, "inversed"))
+ polarity = PWM_POLARITY_INVERSED;
+ else
+ return -EINVAL;
+
+ ret = pwm_set_polarity(pwm, polarity);
+
+ return ret ? : size;
+}
+
+static DEVICE_ATTR(period, 0644, pwm_period_show, pwm_period_store);
+static DEVICE_ATTR(duty_cycle, 0644, pwm_duty_cycle_show, pwm_duty_cycle_store);
+static DEVICE_ATTR(enable, 0644, pwm_enable_show, pwm_enable_store);
+static DEVICE_ATTR(polarity, 0644, pwm_polarity_show, pwm_polarity_store);
+
+static struct attribute *pwm_attrs[] = {
+ &dev_attr_period.attr,
+ &dev_attr_duty_cycle.attr,
+ &dev_attr_enable.attr,
+ &dev_attr_polarity.attr,
+ NULL
+};
+
+static const struct attribute_group pwm_attr_group = {
+ .attrs = pwm_attrs,
+};
+
+static const struct attribute_group *pwm_attr_groups[] = {
+ &pwm_attr_group,
+ NULL,
+};
+
+static void pwm_export_release(struct device *child)
+{
+ struct pwm_export *export = child_to_pwm_export(child);
+
+ kfree(export);
+}
+
+static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
+{
+ struct pwm_export *export;
+ int ret;
+
+ if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags))
+ return -EBUSY;
+
+ export = kzalloc(sizeof(*export), GFP_KERNEL);
+ if (!export) {
+ clear_bit(PWMF_EXPORTED, &pwm->flags);
+ return -ENOMEM;
+ }
+
+ export->pwm = pwm;
+
+ export->child.release = pwm_export_release;
+ export->child.parent = parent;
+ export->child.devt = MKDEV(0, 0);
+ export->child.groups = pwm_attr_groups;
+ dev_set_name(&export->child, "pwm%u", pwm->hwpwm);
+
+ ret = device_register(&export->child);
+ if (ret) {
+ clear_bit(PWMF_EXPORTED, &pwm->flags);
+ kfree(export);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pwm_unexport_match(struct device *child, void *data)
+{
+ return child_to_pwm_device(child) == data;
+}
+
+static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
+{
+ struct device *child;
+
+ if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags))
+ return -ENODEV;
+
+ child = device_find_child(parent, pwm, pwm_unexport_match);
+ if (!child)
+ return -ENODEV;
+
+ /* for device_find_child() */
+ put_device(child);
+ device_unregister(child);
+ pwm_put(pwm);
+
+ return 0;
+}
+
+static ssize_t pwm_export_store(struct device *parent,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pwm_chip *chip = dev_get_drvdata(parent);
+ struct pwm_device *pwm;
+ unsigned int hwpwm;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &hwpwm);
+ if (ret < 0)
+ return ret;
+
+ if (hwpwm >= chip->npwm)
+ return -ENODEV;
+
+ pwm = pwm_request_from_chip(chip, hwpwm, "sysfs");
+ if (IS_ERR(pwm))
+ return PTR_ERR(pwm);
+
+ ret = pwm_export_child(parent, pwm);
+ if (ret < 0)
+ pwm_put(pwm);
+
+ return ret ? : len;
+}
+
+static ssize_t pwm_unexport_store(struct device *parent,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pwm_chip *chip = dev_get_drvdata(parent);
+ unsigned int hwpwm;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &hwpwm);
+ if (ret < 0)
+ return ret;
+
+ if (hwpwm >= chip->npwm)
+ return -ENODEV;
+
+ ret = pwm_unexport_child(parent, &chip->pwms[hwpwm]);
+
+ return ret ? : len;
+}
+
+static ssize_t pwm_npwm_show(struct device *parent,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct pwm_chip *chip = dev_get_drvdata(parent);
+
+ return sprintf(buf, "%u\n", chip->npwm);
+}
+
+static struct device_attribute pwm_chip_attrs[] = {
+ __ATTR(export, 0200, NULL, pwm_export_store),
+ __ATTR(unexport, 0200, NULL, pwm_unexport_store),
+ __ATTR(npwm, 0444, pwm_npwm_show, NULL),
+ __ATTR_NULL,
+};
+
+static struct class pwm_class = {
+ .name = "pwm",
+ .owner = THIS_MODULE,
+ .dev_attrs = pwm_chip_attrs,
+};
+
+static int pwmchip_sysfs_match(struct device *parent, const void *data)
+{
+ return dev_get_drvdata(parent) == data;
+}
+
+void pwmchip_sysfs_export(struct pwm_chip *chip)
+{
+ struct device *parent;
+
+ /*
+ * If device_create() fails the pwm_chip is still usable by
+ * the kernel its just not exported.
+ */
+ parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
+ "pwmchip%d", chip->base);
+ if (IS_ERR(parent)) {
+ dev_warn(chip->dev,
+ "device_create failed for pwm_chip sysfs export\n");
+ }
+}
+
+void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+{
+ struct device *parent;
+
+ parent = class_find_device(&pwm_class, NULL, chip,
+ pwmchip_sysfs_match);
+ if (parent) {
+ /* for class_find_device() */
+ put_device(parent);
+ device_unregister(parent);
+ }
+}
+
+static int __init pwm_sysfs_init(void)
+{
+ return class_register(&pwm_class);
+}
+subsys_initcall(pwm_sysfs_init);
+
This driver provides support for the voltage regulators on the
AS3711 PMIC
-endif
+config REGULATOR_PMIC_BASIN_COVE
+ tristate "PMIC Basin Cove voltage regulator"
+ help
+ This driver controls intel Basin Cove pmic voltage output regulator
+endif
# Makefile for regulator drivers.
#
+CFLAGS_pmic_basin_cove.o := -Werror
obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_PMIC_BASIN_COVE) += pmic_basin_cove.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
--- /dev/null
+/*
+ * pmic_basin_cove.c - Merrifield regulator driver
+ * Copyright (c) 2013, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/intel_basin_cove_pmic.h>
+#include <linux/regulator/machine.h>
+
+#include <asm/intel_scu_pmic.h>
+
+/* Intel Voltage cntrl register parameters*/
+#define REG_ENA_STATUS_MASK 0x01
+#define REG_VSEL_MASK 0xc0
+#define VSEL_SHIFT 6
+
+#define REG_ON 0x01
+#define REG_OFF 0xfe
+
+const u16 reg_addr_offset[] = { VPROG1CNT_ADDR, VPROG2CNT_ADDR,
+ VPROG3CNT_ADDR };
+
+/**
+* intel_pmic_reg_is_enabled - To check if the regulator is enabled
+* @rdev: regulator_dev structure
+* @return value : 1 - Regulator is ON
+* :0 - Regulator is OFF
+*/
+static int intel_pmic_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+ u8 reg;
+ int ret;
+
+ /*FIXME: Is it ok to use the following IPC API*/
+ ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, ®);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "intel_scu_ipc_ioread8 returns error %08x\n", ret);
+ return ret;
+ }
+
+ return reg & REG_ENA_STATUS_MASK;
+}
+/**
+* intel_pmic_reg_enable - To enable the regulator
+* @rdev: regulator_dev structure
+* @return value : 0 - Regulator enabling success
+* :1 - Regulator enabling failed
+*/
+static int intel_pmic_reg_enable(struct regulator_dev *rdev)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+ u8 reg;
+ int ret;
+
+ ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, ®);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "intel_scu_ipc_ioread8 returns error %08x\n", ret);
+ return ret;
+ }
+ return intel_scu_ipc_iowrite8(pmic_info->pmic_reg, (reg | REG_ON));
+}
+/**
+* intel_pmic_reg_disable - To disable the regulator
+* @rdev: regulator_dev structure
+* @return value :0 - Regulator disabling success
+* :1 - Regulator disabling failed
+*/
+static int intel_pmic_reg_disable(struct regulator_dev *rdev)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+ u8 reg;
+ int ret;
+
+ ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, ®);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "intel_scu_ipc_ioread8 returns error %08x\n", ret);
+ return ret;
+ }
+ return intel_scu_ipc_iowrite8(pmic_info->pmic_reg,
+ (reg & REG_OFF));
+}
+/**
+* intel_pmic_reg_listvoltage - Return the voltage value,this is called
+* from core framework
+* @rdev: regulator source
+* @index : passed on from core
+* @return value : Returns the value in micro volts.
+ */
+static int intel_pmic_reg_listvoltage(struct regulator_dev *rdev,
+ unsigned index)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+
+ if (index >= pmic_info->table_len) {
+ dev_err(&rdev->dev, "Index out of range in listvoltage\n");
+ return -EINVAL;
+ }
+ return pmic_info->table[index] * 1000;
+}
+/**
+* intel_pmic_reg_getvoltage - Return the current voltage value in uV
+* @rdev: regulator_dev structure
+* @return value : Returns the voltage value.
+*/
+static int intel_pmic_reg_getvoltage(struct regulator_dev *rdev)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+ u8 reg, vsel;
+ int ret;
+
+ ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, ®);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "intel_scu_ipc_ioread8 returns error %08x\n", ret);
+ return ret;
+ }
+ vsel = (reg & REG_VSEL_MASK) >> VSEL_SHIFT;
+ if (vsel >= pmic_info->table_len) {
+ dev_err(&rdev->dev, "vsel value is out of range\n");
+ return -EINVAL;
+ }
+ dev_dbg(&rdev->dev, "Voltage value is %d mV\n",
+ pmic_info->table[vsel]);
+ return pmic_info->table[vsel] * 1000;
+}
+
+/**
+* intel_pmic_reg_setvoltage - Set voltage to the regulator
+* @rdev: regulator_dev structure
+* @min_uV: Minimum required voltage in uV
+* @max_uV: Maximum acceptable voltage in uV
+* @selector: Voltage value passed back to core layer
+* Sets a voltage regulator to the desired output voltage
+* @return value : Returns 0 if success
+* : Return error value on failure
+*/
+static int intel_pmic_reg_setvoltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+ int ret;
+ u8 reg, vsel;
+
+ for (vsel = 0; vsel < pmic_info->table_len; vsel++) {
+ int mV = pmic_info->table[vsel];
+ int uV = mV * 1000;
+ if (min_uV > uV || uV > max_uV)
+ continue;
+
+ *selector = vsel;
+ ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, ®);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "intel_scu_ipc_ioread8 error %08x\n", ret);
+ return ret;
+ }
+ reg &= ~REG_VSEL_MASK;
+ reg |= vsel << VSEL_SHIFT;
+ dev_dbg(&rdev->dev,
+ "intel_pmic_reg_setvoltage voltage: %u uV\n", uV);
+ return intel_scu_ipc_iowrite8(pmic_info->pmic_reg, reg);
+ }
+ return -EINVAL;
+}
+
+/* regulator_ops registration */
+static struct regulator_ops intel_pmic_ops = {
+ .is_enabled = intel_pmic_reg_is_enabled,
+ .enable = intel_pmic_reg_enable,
+ .disable = intel_pmic_reg_disable,
+ .get_voltage = intel_pmic_reg_getvoltage,
+ .set_voltage = intel_pmic_reg_setvoltage,
+ .list_voltage = intel_pmic_reg_listvoltage,
+};
+/**
+* struct regulator_desc - Regulator descriptor
+* Each regulator registered with the core is described with a structure of
+* this type.
+* @name: Identifying name for the regulator.
+* @id: Numerical identifier for the regulator.
+* @n_voltages: Number of selectors available for ops.list_voltage().
+* @ops: Regulator operations table.
+* @irq: Interrupt number for the regulator.
+* @type: Indicates if the regulator is a voltage or current regulator.
+* @owner: Module providing the regulator, used for refcounting.
+*/
+static struct regulator_desc intel_pmic_desc[] = {
+ {
+ .name = "vprog1",
+ .id = VPROG1,
+ .ops = &intel_pmic_ops,
+ .n_voltages = ARRAY_SIZE(VPROG1_VSEL_table),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "vprog2",
+ .id = VPROG2,
+ .ops = &intel_pmic_ops,
+ .n_voltages = ARRAY_SIZE(VPROG2_VSEL_table),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "vprog3",
+ .id = VPROG3,
+ .ops = &intel_pmic_ops,
+ .n_voltages = ARRAY_SIZE(VPROG3_VSEL_table),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int basin_cove_pmic_probe(struct platform_device *pdev)
+{
+ struct intel_pmic_info *pdata = dev_get_platdata(&pdev->dev);
+ struct regulator_config config = { };
+ unsigned int i;
+
+ if (!pdata || !pdata->pmic_reg)
+ return -EINVAL;
+
+ config.dev = &pdev->dev;
+ config.init_data = pdata->init_data;
+ config.driver_data = pdata;
+
+ for (i = 0; i < ARRAY_SIZE(reg_addr_offset); i++) {
+ if (reg_addr_offset[i] == pdata->pmic_reg)
+ break;
+ }
+ if (i == (ARRAY_SIZE(reg_addr_offset)))
+ return -EINVAL;
+
+ pdata->intel_pmic_rdev =
+ regulator_register(&intel_pmic_desc[i], &config);
+ if (IS_ERR(pdata->intel_pmic_rdev)) {
+ dev_err(&pdev->dev, "can't register regulator..error %ld\n",
+ PTR_ERR(pdata->intel_pmic_rdev));
+ return PTR_ERR(pdata->intel_pmic_rdev);
+ }
+ platform_set_drvdata(pdev, pdata->intel_pmic_rdev);
+ dev_dbg(&pdev->dev, "registered regulator\n");
+ return 0;
+}
+
+static int basin_cove_pmic_remove(struct platform_device *pdev)
+{
+ regulator_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct platform_device_id basin_cove_id_table[] = {
+ { "intel_regulator", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(platform, basin_cove_id_table);
+
+static struct platform_driver basin_cove_pmic_driver = {
+ .driver = {
+ .name = "intel_regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = basin_cove_pmic_probe,
+ .remove = basin_cove_pmic_remove,
+ .id_table = basin_cove_id_table,
+};
+static int __init basin_cove_pmic_init(void)
+{
+ return platform_driver_register(&basin_cove_pmic_driver);
+}
+subsys_initcall(basin_cove_pmic_init);
+
+static void __exit basin_cove_pmic_exit(void)
+{
+ platform_driver_unregister(&basin_cove_pmic_driver);
+}
+module_exit(basin_cove_pmic_exit);
+
+MODULE_DESCRIPTION("Basin Cove voltage regulator driver");
+MODULE_AUTHOR("Vishwesh/Mahesh/Sudarshan");
+MODULE_LICENSE("GPL v2");
It's safe to say n here if you're not interested in multimedia
offloading.
+config INTEL_MID_REMOTEPROC
+ tristate "Intel MID remoteproc support"
+ depends on X86
+ select REMOTEPROC
+ select RPMSG
+ help
+ Say y to support Intel MID's remote processors core driver
+ and SCU driver.
+ Please say y here if you want to enable x86 remoteproc core
+ driver support.
+
endmenu
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_INTEL_MID_REMOTEPROC) += intel_mid_rproc_scu.o intel_mid_rproc_core.o
--- /dev/null
+/*
+ * INTEL MID Remote Processor Core driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+#define RPMSG_NS_ADDR 53
+
+/**
+ * rpmsg_ns_alloc() - allocate a name service annoucement structure
+ * @name: name of remote service
+ * @id: rproc type
+ * @addr: address of remote service
+ */
+struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name, int id, u32 addr)
+{
+ struct rpmsg_ns_info *ns_info;
+
+ ns_info = kzalloc(sizeof(struct rpmsg_ns_info), GFP_KERNEL);
+ if (ns_info) {
+ strcpy(ns_info->name, name);
+ ns_info->type = id;
+ ns_info->addr = addr;
+ ns_info->flags = RPMSG_NS_CREATE;
+ }
+
+ return ns_info;
+};
+EXPORT_SYMBOL_GPL(rpmsg_ns_alloc);
+
+/**
+ * rpmsg_ns_add_to_list() -- add a name service node to the global list
+ * @info: name service node
+ */
+void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+ struct rpmsg_ns_list *nslist)
+{
+ mutex_lock(&nslist->lock);
+ list_add_tail(&info->node, &nslist->list);
+ mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_add_to_list);
+
+/**
+ * free_rpmsg_ns() -- free rpmsg name service node
+ * @info: name service node
+ */
+void free_rpmsg_ns(struct rpmsg_ns_info *info)
+{
+ kfree(info);
+}
+
+/**
+ * rpmsg_ns_del_list() -- free rpmsg name service list
+ */
+void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist)
+{
+ struct rpmsg_ns_info *info, *next;
+ mutex_lock(&nslist->lock);
+ list_for_each_entry_safe(info, next, &nslist->list, node) {
+ list_del(&info->node);
+ free_rpmsg_ns(info);
+ }
+ mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_del_list);
+
+/**
+ * find_rvdev() - find the rproc state of a supported virtio device
+ * @rproc: rproc handle
+ * @id: virtio device id
+ */
+struct rproc_vdev *find_rvdev(struct rproc *rproc, int id)
+{
+ struct rproc_vdev *rvdev;
+
+ list_for_each_entry(rvdev, &rproc->rvdevs, node)
+ if (rvdev->vdev.id.device == id)
+ return rvdev;
+
+ return NULL;
+}
+
+/*
+ * Since we could not get vring structure directly from rproc_vring
+ * structure, we have to create two local vrings and identify them
+ * by matching with rproc_vrings.
+ * @id: virtio device id.
+ * Currently one rproc_vdev is supported by firmware, and the id is
+ * VIRTIO_ID_RPMSG (declared in linux/virtio_ids.h).
+ */
+int find_vring_index(struct rproc *rproc, int vqid, int id)
+{
+ struct rproc_vdev *rvdev;
+ struct device *dev = rproc->dev.parent;
+ int vring_idx = 0;
+
+ rvdev = find_rvdev(rproc, id);
+ if (rvdev == NULL) {
+ dev_err(dev, "virtio device not found\n");
+ return -EINVAL;
+ }
+
+ while (vring_idx < RVDEV_NUM_VRINGS) {
+ if (rvdev->vring[vring_idx].notifyid == vqid)
+ break;
+ vring_idx++;
+ }
+
+ /* no match found? there's a problem */
+ if (vring_idx == RVDEV_NUM_VRINGS) {
+ dev_err(dev, "Can not find vring\n");
+ return -EINVAL;
+ }
+
+ return vring_idx;
+}
+
+void intel_mid_rproc_vring_init(struct rproc *rproc,
+ struct vring *vring, enum local_vring_idx id)
+{
+ int align, len;
+ void *addr;
+ struct rproc_vdev *rvdev;
+ struct device *dev = rproc->dev.parent;
+
+ rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+ if (rvdev == NULL) {
+ dev_err(dev, "virtio device not found\n");
+ return;
+ }
+
+ addr = rvdev->vring[id].va;
+ align = rvdev->vring[id].align;
+ len = rvdev->vring[id].len;
+ vring_init(vring, len, addr, align);
+}
+
+/**
+ * intel_mid_rproc_vq_interrupt() - inform a vq interrupt to rproc
+ * after vq buffers are handled
+ * @rproc: rproc handle
+ * @msg: vq notify id
+ */
+void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg)
+{
+ struct device *dev = rproc->dev.parent;
+
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_err(dev, "no message was found in vqid %d\n", msg);
+}
+
+/**
+ * intel_mid_rproc_msg_handle() - generic interface as a vq buffer handle
+ * during rpmsg transaction
+ * @iproc: intel mid rproc data
+ */
+int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc)
+{
+ int ret;
+ struct vring *r_vring, *s_vring;
+ void *r_virt_addr, *s_virt_addr;
+ u16 r_idx, s_idx;
+ u64 r_dma_addr, s_dma_addr;
+ u32 r_len, s_len;
+
+ r_vring = &iproc->rx_vring;
+ s_vring = &iproc->tx_vring;
+
+ r_idx = iproc->r_vring_last_used & (r_vring->num - 1);
+ s_idx = iproc->s_vring_last_used & (s_vring->num - 1);
+
+ r_dma_addr = r_vring->desc[r_idx].addr;
+ s_dma_addr = s_vring->desc[s_idx].addr;
+
+ r_virt_addr = phys_to_virt(r_dma_addr);
+ s_virt_addr = phys_to_virt(s_dma_addr);
+
+ ret = iproc->rproc_rpmsg_handle(r_virt_addr, s_virt_addr,
+ &r_len, &s_len);
+
+ r_vring->used->ring[r_idx].id = r_idx;
+ r_vring->used->ring[r_idx].len = r_len;
+ r_vring->used->idx++;
+
+ s_vring->used->ring[s_idx].id = s_idx;
+ s_vring->used->ring[s_idx].len = s_len;
+ s_vring->used->idx++;
+
+ iproc->r_vring_last_used++;
+ iproc->s_vring_last_used++;
+
+ return ret;
+}
+
+/**
+ * Remoteproc side rx buffer handler during name service creation.
+ * @iproc: intel mid rproc data
+ * @ns_info: name service info
+ *
+ * After remote processor receives name service messages, it needs to
+ * update the elements of its virtio device's rx virtqueue buffer
+ * before next rpmsg transaction.
+ * Here we have this function simulating the above effect.
+ */
+int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+ struct rpmsg_ns_info *ns_info)
+{
+ u16 index;
+ u32 len;
+ u64 dma_addr;
+ void *virt_addr;
+
+ struct vring *r_vring;
+ struct rpmsg_hdr *msg;
+ struct rpmsg_ns_msg *nsm;
+
+ if (ns_info == NULL) {
+ pr_err("ns_info = NULL\n");
+ return -ENODEV;
+ }
+
+ r_vring = &iproc->rx_vring;
+
+ index = iproc->r_vring_last_used & (r_vring->num - 1);
+
+ len = sizeof(*msg) + sizeof(*nsm);
+
+ dma_addr = r_vring->desc[index].addr;
+ virt_addr = phys_to_virt(dma_addr);
+
+ msg = (struct rpmsg_hdr *)virt_addr;
+ nsm = (struct rpmsg_ns_msg *)(virt_addr + sizeof(*msg));
+
+ nsm->addr = ns_info->addr;
+ nsm->flags = ns_info->flags;
+ strncpy(nsm->name, ns_info->name, RPMSG_NAME_SIZE);
+
+ msg->len = sizeof(*nsm);
+ msg->src = nsm->addr;
+ msg->dst = RPMSG_NS_ADDR;
+
+ r_vring->used->ring[index].id = index;
+ r_vring->used->ring[index].len = len;
+ r_vring->used->idx++;
+
+ iproc->r_vring_last_used++;
+
+ return 0;
+}
--- /dev/null
+/*
+ * INTEL MID Remote Processor Core Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+typedef int (*rpmsg_handle_t)(void *rx_buf, void *tx_buf,
+ u32 *r_len, u32 *s_len);
+
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len);
+int psh_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *len);
+
+#define RPROC_FW_LOADING_TIMEOUT (3 * HZ)
+#define IPROC_NAME_SIZE 20
+
+/**
+ * struct intel_mid_rproc - intel mid remote processor
+ * @ns_enabled: name service enabled flag
+ * @name: rproc name
+ * @type: rproc type
+ * @r_vring_last_used: last used index of rx vring
+ * @s_vring_last_used: last used index of tx vring
+ * @rproc: rproc handle
+ * @rx_vring: rproc rx vring
+ * @tx_vring: rproc tx vring
+ * @ns_info: loop cursor when creating ns channels
+ * @rproc_rpmsg_handle: rproc private rpmsg handle
+ */
+struct intel_mid_rproc {
+ bool ns_enabled;
+ char name[IPROC_NAME_SIZE];
+ u32 type;
+ u32 r_vring_last_used;
+ u32 s_vring_last_used;
+ struct rproc *rproc;
+ struct vring rx_vring;
+ struct vring tx_vring;
+ struct rpmsg_ns_info *ns_info;
+ rpmsg_handle_t rproc_rpmsg_handle;
+};
+
+enum local_vring_idx {
+ RX_VRING,
+ TX_VRING,
+};
+
+extern void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg);
+extern int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc);
+extern int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+ struct rpmsg_ns_info *ns_info);
+
+extern struct rproc_vdev *find_rvdev(struct rproc *rproc, int id);
+extern int find_vring_index(struct rproc *rproc, int vqid, int id);
+extern void intel_mid_rproc_vring_init(struct rproc *rproc,
+ struct vring *vring, enum local_vring_idx id);
+
+extern void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist);
+
+/* Please do NOT use these APIs to send ipc commands,
+ * use rpmsg commands defined in <asm/intel_mid_rpmsg.h>
+ */
+extern void intel_scu_ipc_send_command(u32 cmd);
+
+/* Issue commands to the SCU with or without data */
+extern int intel_scu_ipc_simple_command(int cmd, int sub);
+extern int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+ u32 *out, u32 outlen);
+extern int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+ u32 *out, u32 outlen, u32 dptr, u32 sptr);
+
+/* IPC locking */
+extern void intel_scu_ipc_lock(void);
+extern void intel_scu_ipc_unlock(void);
--- /dev/null
+/*
+ * INTEL MID Remote Processor - SCU driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/scu_ipc_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+static struct rpmsg_ns_list *nslist;
+
+
+static int scu_ipc_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ ret = intel_scu_ipc_command(tx_msg->cmd, tx_msg->sub,
+ tx_msg->in, tx_msg->inlen,
+ tx_msg->out, tx_msg->outlen);
+ return ret;
+}
+
+static int scu_ipc_raw_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ intel_scu_ipc_lock();
+ ret = intel_scu_ipc_raw_cmd(tx_msg->cmd, tx_msg->sub,
+ tx_msg->in, tx_msg->inlen,
+ tx_msg->out, tx_msg->outlen,
+ tx_msg->dptr, tx_msg->sptr);
+ intel_scu_ipc_unlock();
+
+ return ret;
+}
+
+static int scu_ipc_simple_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ ret = intel_scu_ipc_simple_command(tx_msg->cmd, tx_msg->sub);
+
+ return ret;
+}
+
+static void scu_ipc_send_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+ intel_scu_ipc_send_command(tx_msg->sub << 12 | tx_msg->cmd);
+}
+
+static int scu_ipc_fw_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ switch (tx_msg->cmd) {
+ case RP_GET_FW_REVISION:
+ ret = scu_ipc_command(tx_buf);
+ break;
+ case RP_FW_UPDATE:
+ /* Only scu_ipc_send_command works for fw update */
+ scu_ipc_send_command(tx_buf);
+ break;
+ default:
+ pr_info("Command %x not supported\n", tx_msg->cmd);
+ break;
+ };
+
+ return ret;
+}
+
+static int scu_ipc_util_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ switch (tx_msg->cmd) {
+ case RP_GET_FW_REVISION:
+ case RP_GET_HOBADDR:
+ case RP_OSC_CLK_CTRL:
+ ret = scu_ipc_command(tx_buf);
+ break;
+ case RP_S0IX_COUNTER:
+ ret = scu_ipc_simple_command(tx_buf);
+ break;
+ case RP_WRITE_OSNIB:
+ ret = scu_ipc_raw_command(tx_buf);
+ break;
+ default:
+ pr_info("Command %x not supported\n", tx_msg->cmd);
+ break;
+ };
+
+ return ret;
+}
+
+static int scu_ipc_vrtc_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ switch (tx_msg->cmd) {
+ case RP_GET_HOBADDR:
+ ret = scu_ipc_command(tx_buf);
+ break;
+ case RP_VRTC:
+ ret = scu_ipc_simple_command(tx_buf);
+ break;
+ default:
+ pr_info("Command %x not supported\n", tx_msg->cmd);
+ break;
+ };
+
+ return ret;
+}
+
+static int scu_ipc_fw_logging_command(void *tx_buf)
+{
+ struct tx_ipc_msg *tx_msg;
+ int ret = 0;
+
+ tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+ switch (tx_msg->cmd) {
+ case RP_GET_HOBADDR:
+ ret = scu_ipc_command(tx_buf);
+ break;
+ case RP_CLEAR_FABERROR:
+ ret = scu_ipc_simple_command(tx_buf);
+ break;
+ default:
+ pr_info("Command %x not supported\n", tx_msg->cmd);
+ break;
+ };
+
+ return ret;
+}
+
+/**
+ * scu_ipc_rpmsg_handle() - scu rproc specified ipc rpmsg handle
+ * @rx_buf: rx buffer to be add
+ * @tx_buf: tx buffer to be get
+ * @r_len: rx buffer length
+ * @s_len: tx buffer length
+ */
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len)
+{
+ struct rpmsg_hdr *tx_hdr, *tmp_hdr;
+ struct tx_ipc_msg *tx_msg;
+ struct rx_ipc_msg *tmp_msg;
+ int ret = 0;
+
+ *r_len = sizeof(struct rpmsg_hdr) + sizeof(struct rx_ipc_msg);
+ *s_len = sizeof(struct rpmsg_hdr) + sizeof(struct tx_ipc_msg);
+
+ /* get tx_msg and send scu ipc command */
+ tx_hdr = (struct rpmsg_hdr *)tx_buf;
+ tx_msg = (struct tx_ipc_msg *)(tx_buf + sizeof(*tx_hdr));
+
+ tmp_hdr = (struct rpmsg_hdr *)rx_buf;
+ tmp_msg = (struct rx_ipc_msg *)tmp_hdr->data;
+
+ switch (tx_hdr->dst) {
+ case RP_PMIC_ACCESS:
+ case RP_FLIS_ACCESS:
+ case RP_IPC_COMMAND:
+ tmp_msg->status = scu_ipc_command(tx_msg);
+ break;
+ case RP_SET_WATCHDOG:
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ tmp_msg->status = scu_ipc_raw_command(tx_msg);
+ else
+ tmp_msg->status = scu_ipc_command(tx_msg);
+ break;
+ case RP_MIP_ACCESS:
+ case RP_IPC_RAW_COMMAND:
+ tmp_msg->status = scu_ipc_raw_command(tx_msg);
+ break;
+ case RP_IPC_SIMPLE_COMMAND:
+ tmp_msg->status = scu_ipc_simple_command(tx_msg);
+ break;
+ case RP_IPC_UTIL:
+ tmp_msg->status = scu_ipc_util_command(tx_msg);
+ break;
+ case RP_FW_ACCESS:
+ tmp_msg->status = scu_ipc_fw_command(tx_msg);
+ break;
+ case RP_VRTC:
+ tmp_msg->status = scu_ipc_vrtc_command(tx_msg);
+ break;
+ case RP_FW_LOGGING:
+ tmp_msg->status = scu_ipc_fw_logging_command(tx_msg);
+ break;
+ default:
+ tmp_msg->status = 0;
+ pr_info("Command %x not supported yet\n", tx_hdr->dst);
+ break;
+ };
+
+ /* prepare rx buffer, switch src and dst */
+ tmp_hdr->src = tx_hdr->dst;
+ tmp_hdr->dst = tx_hdr->src;
+
+ tmp_hdr->flags = tx_hdr->flags;
+ tmp_hdr->len = sizeof(struct rx_ipc_msg);
+
+ return ret;
+}
+
+/* kick a virtqueue */
+static void intel_rproc_scu_kick(struct rproc *rproc, int vqid)
+{
+ int idx;
+ int ret;
+ struct intel_mid_rproc *iproc;
+ struct rproc_vdev *rvdev;
+ struct device *dev = rproc->dev.parent;
+ static unsigned long ns_info_all_received;
+
+ iproc = (struct intel_mid_rproc *)rproc->priv;
+
+ /*
+ * Remote processor virtqueue being kicked.
+ * This part simulates remote processor handling messages.
+ */
+ idx = find_vring_index(rproc, vqid, VIRTIO_ID_RPMSG);
+
+ switch (idx) {
+ case RX_VRING:
+ if (iproc->ns_enabled && !ns_info_all_received) {
+ /* push messages with ns_info for ALL available
+ name services in the list (nslist) into
+ rx buffers. */
+ list_for_each_entry_continue(iproc->ns_info,
+ &nslist->list, node) {
+ ret = intel_mid_rproc_ns_handle(iproc,
+ iproc->ns_info);
+ if (ret) {
+ dev_err(dev, "ns handle error\n");
+ return;
+ }
+ }
+
+ ns_info_all_received = 1;
+ intel_mid_rproc_vq_interrupt(rproc, vqid);
+ }
+ break;
+
+ case TX_VRING:
+
+ dev_dbg(dev, "remote processor got the message ...\n");
+ intel_mid_rproc_msg_handle(iproc);
+ intel_mid_rproc_vq_interrupt(rproc, vqid);
+
+ /*
+ * After remoteproc handles the message, it calls
+ * the receive callback.
+ * TODO: replace this part with real remote processor
+ * operation.
+ */
+ rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+ if (rvdev)
+ intel_mid_rproc_vq_interrupt(rproc,
+ rvdev->vring[RX_VRING].notifyid);
+ else
+ WARN(1, "%s: can't find given rproc state\n", __func__);
+ break;
+
+ default:
+ dev_err(dev, "invalid vring index\n");
+ break;
+ }
+}
+
+/* power up the remote processor */
+static int intel_rproc_scu_start(struct rproc *rproc)
+{
+ struct intel_mid_rproc *iproc;
+
+ pr_info("Started intel scu remote processor\n");
+ iproc = (struct intel_mid_rproc *)rproc->priv;
+ intel_mid_rproc_vring_init(rproc, &iproc->rx_vring, RX_VRING);
+ intel_mid_rproc_vring_init(rproc, &iproc->tx_vring, TX_VRING);
+
+ return 0;
+}
+
+/* power off the remote processor */
+static int intel_rproc_scu_stop(struct rproc *rproc)
+{
+ pr_info("Stopped intel scu remote processor\n");
+ return 0;
+}
+
+static struct rproc_ops intel_rproc_scu_ops = {
+ .start = intel_rproc_scu_start,
+ .stop = intel_rproc_scu_stop,
+ .kick = intel_rproc_scu_kick,
+};
+
+static int intel_rproc_scu_probe(struct platform_device *pdev)
+{
+ struct intel_mid_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct intel_mid_rproc *iproc;
+ struct rproc *rproc;
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdata->name, &intel_rproc_scu_ops,
+ pdata->firmware, sizeof(*iproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ iproc = rproc->priv;
+ iproc->rproc = rproc;
+ nslist = pdata->nslist;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ /*
+ * Temporarily follow the rproc framework to load firmware
+ * TODO: modify remoteproc code according to X86 architecture
+ */
+ if (0 == wait_for_completion_timeout(&rproc->firmware_loading_complete,
+ RPROC_FW_LOADING_TIMEOUT)) {
+ dev_err(pdev->dev.parent, "fw loading not complete\n");
+ goto free_rproc;
+ }
+
+ /* Initialize intel_rproc_scu private data */
+ strncpy(iproc->name, pdev->id_entry->name, sizeof(iproc->name) - 1);
+ iproc->type = pdev->id_entry->driver_data;
+ iproc->r_vring_last_used = 0;
+ iproc->s_vring_last_used = 0;
+ iproc->ns_enabled = true;
+ iproc->rproc_rpmsg_handle = scu_ipc_rpmsg_handle;
+ iproc->ns_info = list_entry(&nslist->list,
+ struct rpmsg_ns_info, node);
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+ return ret;
+}
+
+static int intel_rproc_scu_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ if (nslist)
+ rpmsg_ns_del_list(nslist);
+
+ rproc_del(rproc);
+ rproc_put(rproc);
+
+ return 0;
+}
+
+static const struct platform_device_id intel_rproc_scu_id_table[] = {
+ { "intel_rproc_scu", RPROC_SCU },
+ { },
+};
+
+static struct platform_driver intel_rproc_scu_driver = {
+ .probe = intel_rproc_scu_probe,
+ .remove = intel_rproc_scu_remove,
+ .driver = {
+ .name = "intel_rproc_scu",
+ .owner = THIS_MODULE,
+ },
+ .id_table = intel_rproc_scu_id_table,
+};
+
+static int __init intel_rproc_scu_init(void)
+{
+ return platform_driver_register(&intel_rproc_scu_driver);
+}
+
+static void __exit intel_rproc_scu_exit(void)
+{
+ platform_driver_unregister(&intel_rproc_scu_driver);
+}
+
+subsys_initcall(intel_rproc_scu_init);
+module_exit(intel_rproc_scu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("INTEL MID Remoteproc Core driver");
select VIRTIO
select VIRTUALIZATION
+config RPMSG_IPC
+ tristate "Build rpmsg ipc driver"
+ depends on RPMSG
+ help
+ Build an rpmsg ipc driver, which demonstrates how IA
+ communicates with remote processor through IPC rpmsg
+ over the rpmsg bus. It register a rpmsg driver matched
+ with the rpmsg device created in remoteproc framework.
+
endmenu
obj-$(CONFIG_RPMSG) += virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG_IPC) += intel_mid_rpmsg.o
--- /dev/null
+/*
+ * rpmsg_mid_rpmsg.c - Intel RPMSG Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/rpmsg.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_mid_rpmsg.h>
+
+/* Instance for generic kernel IPC calls */
+static struct rpmsg_device_data rpmsg_ddata[RPMSG_IPC_COMMAND_TYPE_NUM] = {
+ [RPMSG_IPC_COMMAND] = {
+ .name = "rpmsg_ipc_command",
+ .rpdev = NULL, /* initialized during driver probe */
+ .rpmsg_instance = NULL, /* initialized during driver probe */
+ },
+ [RPMSG_IPC_SIMPLE_COMMAND] = {
+ .name = "rpmsg_ipc_simple_command",
+ .rpdev = NULL,
+ .rpmsg_instance = NULL,
+ },
+ [RPMSG_IPC_RAW_COMMAND] = {
+ .name = "rpmsg_ipc_raw_command",
+ .rpdev = NULL,
+ .rpmsg_instance = NULL,
+ },
+};
+
+/* Providing rpmsg ipc generic interfaces.
+ * Modules can call these API directly without registering rpmsg driver.
+ *
+ * The arg list is the same as intel_scu_ipc_command(),
+ * so simply change intel_scu_ipc_command() to rpmsg_send_generic_command()
+ */
+int rpmsg_send_generic_command(u32 cmd, u32 sub,
+ u8 *in, u32 inlen,
+ u32 *out, u32 outlen)
+{
+ struct rpmsg_instance *rpmsg_ipc_instance =
+ rpmsg_ddata[RPMSG_IPC_COMMAND].rpmsg_instance;
+
+ return rpmsg_send_command(rpmsg_ipc_instance, cmd, sub,
+ in, out, inlen, outlen);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_command);
+
+int rpmsg_send_generic_simple_command(u32 cmd, u32 sub)
+{
+ struct rpmsg_instance *rpmsg_ipc_instance =
+ rpmsg_ddata[RPMSG_IPC_SIMPLE_COMMAND].rpmsg_instance;
+
+ return rpmsg_send_simple_command(rpmsg_ipc_instance, cmd, sub);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_simple_command);
+
+int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+ u8 *in, u32 inlen,
+ u32 *out, u32 outlen,
+ u32 dptr, u32 sptr)
+{
+ struct rpmsg_instance *rpmsg_ipc_instance =
+ rpmsg_ddata[RPMSG_IPC_RAW_COMMAND].rpmsg_instance;
+
+ return rpmsg_send_raw_command(rpmsg_ipc_instance, cmd, sub,
+ in, out, inlen, outlen, sptr, dptr);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_raw_command);
+
+/* Global lock for rpmsg framework */
+static struct rpmsg_lock global_lock = {
+ .lock = __MUTEX_INITIALIZER(global_lock.lock),
+ .locked_prev = 0,
+ .pending = ATOMIC_INIT(0),
+};
+
+#define is_global_locked_prev (global_lock.locked_prev)
+#define set_global_locked_prev(lock) (global_lock.locked_prev = lock)
+#define global_locked_by_current (global_lock.lock.owner == current)
+
+void rpmsg_global_lock(void)
+{
+ atomic_inc(&global_lock.pending);
+ mutex_lock(&global_lock.lock);
+}
+EXPORT_SYMBOL(rpmsg_global_lock);
+
+void rpmsg_global_unlock(void)
+{
+ mutex_unlock(&global_lock.lock);
+ if (!atomic_dec_and_test(&global_lock.pending))
+ schedule();
+}
+EXPORT_SYMBOL(rpmsg_global_unlock);
+
+static void rpmsg_lock(void)
+{
+ if (!mutex_trylock(&global_lock.lock)) {
+ if (global_locked_by_current)
+ set_global_locked_prev(1);
+ else
+ rpmsg_global_lock();
+ }
+}
+
+static void rpmsg_unlock(void)
+{
+ if (!is_global_locked_prev)
+ rpmsg_global_unlock();
+ else
+ set_global_locked_prev(0);
+}
+
+int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub, u8 *in,
+ u32 *out, u32 inlen,
+ u32 outlen)
+{
+ int ret = 0;
+
+ if (!instance) {
+ pr_err("%s: Instance is NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ /* Hold global rpmsg lock */
+ rpmsg_lock();
+
+ mutex_lock(&instance->instance_lock);
+
+ /* Prepare Tx buffer */
+ instance->tx_msg->cmd = cmd;
+ instance->tx_msg->sub = sub;
+ instance->tx_msg->in = in;
+ instance->tx_msg->out = out;
+ instance->tx_msg->inlen = inlen;
+ instance->tx_msg->outlen = outlen;
+
+ /* Preapre Rx buffer */
+ mutex_lock(&instance->rx_lock);
+ instance->rx_msg->status = -1;
+ mutex_unlock(&instance->rx_lock);
+ INIT_COMPLETION(instance->reply_arrived);
+
+ /* Send message to remote processor(SCU) using rpdev channel */
+ ret = rpmsg_send_offchannel(
+ instance->rpdev,
+ instance->endpoint->addr,
+ instance->rpdev->dst,
+ instance->tx_msg,
+ sizeof(*instance->tx_msg)
+ );
+ if (ret) {
+ dev_err(&instance->rpdev->dev, "%s failed: %d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ if (0 == wait_for_completion_timeout(&instance->reply_arrived,
+ RPMSG_TX_TIMEOUT)) {
+ dev_err(&instance->rpdev->dev,
+ "timeout: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ mutex_lock(&instance->rx_lock);
+ ret = instance->rx_msg->status;
+ mutex_unlock(&instance->rx_lock);
+end:
+ mutex_unlock(&instance->instance_lock);
+ rpmsg_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_command);
+
+int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub, u8 *in,
+ u32 *out, u32 inlen,
+ u32 outlen, u32 sptr,
+ u32 dptr)
+{
+ int ret = 0;
+
+ if (!instance) {
+ pr_err("%s: Instance is NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ mutex_lock(&instance->instance_lock);
+ instance->tx_msg->sptr = sptr;
+ instance->tx_msg->dptr = dptr;
+ mutex_unlock(&instance->instance_lock);
+
+ ret = rpmsg_send_command(instance, cmd, sub, in, out, inlen, outlen);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_raw_command);
+
+int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+ u32 sub)
+{
+ int ret;
+
+ ret = rpmsg_send_command(instance, cmd, sub, NULL, NULL, 0, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_simple_command);
+
+static void rpmsg_recv_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+#ifdef DEBUG_RPMSG_MSG
+ static int rx_count;
+#endif
+ struct rpmsg_instance *instance = priv;
+
+ if (len != sizeof(struct rx_ipc_msg)) {
+ dev_warn(&rpdev->dev, "%s, incorrect msg length\n", __func__);
+ return;
+ }
+
+#ifdef DEBUG_RPMSG_MSG
+ dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+#endif
+
+ mutex_lock(&instance->rx_lock);
+
+ memcpy(instance->rx_msg, data, len);
+
+ mutex_unlock(&instance->rx_lock);
+
+ complete(&instance->reply_arrived);
+
+}
+
+int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+ struct rpmsg_instance **pInstance)
+{
+ int ret = 0;
+ struct rpmsg_instance *instance;
+
+ dev_info(&rpdev->dev, "Allocating rpmsg_instance\n");
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ ret = -ENOMEM;
+ dev_err(&rpdev->dev, "kzalloc rpmsg_instance failed\n");
+ goto alloc_out;
+ }
+
+ instance->rpdev = rpdev;
+
+ instance->tx_msg = kzalloc(sizeof(struct tx_ipc_msg), GFP_KERNEL);
+ if (!instance->tx_msg) {
+ ret = -ENOMEM;
+ dev_err(&rpdev->dev, "kzalloc instance tx_msg failed\n");
+ goto error_tx_msg_create;
+ }
+
+ instance->rx_msg = kzalloc(sizeof(struct rx_ipc_msg), GFP_KERNEL);
+ if (!instance->rx_msg) {
+ ret = -ENOMEM;
+ dev_err(&rpdev->dev, "kzalloc instance rx_msg failed\n");
+ goto error_rx_msg_create;
+ }
+
+ instance->endpoint = rpmsg_create_ept(rpdev, rpmsg_recv_cb,
+ instance,
+ RPMSG_ADDR_ANY);
+ if (!instance->endpoint) {
+ dev_err(&rpdev->dev, "create instance endpoint failed\n");
+ ret = -ENOMEM;
+ goto error_endpoint_create;
+ }
+
+ goto alloc_out;
+
+error_endpoint_create:
+ kfree(instance->rx_msg);
+ instance->rx_msg = NULL;
+error_rx_msg_create:
+ kfree(instance->tx_msg);
+ instance->tx_msg = NULL;
+error_tx_msg_create:
+ kfree(instance);
+ instance = NULL;
+alloc_out:
+ *pInstance = instance;
+ return ret;
+
+}
+EXPORT_SYMBOL(alloc_rpmsg_instance);
+
+void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+ struct rpmsg_instance **pInstance)
+{
+ struct rpmsg_instance *instance = *pInstance;
+
+ mutex_lock(&instance->instance_lock);
+ rpmsg_destroy_ept(instance->endpoint);
+ kfree(instance->tx_msg);
+ instance->tx_msg = NULL;
+ kfree(instance->rx_msg);
+ instance->rx_msg = NULL;
+ mutex_unlock(&instance->instance_lock);
+ kfree(instance);
+ *pInstance = NULL;
+ dev_info(&rpdev->dev, "Freeing rpmsg device\n");
+}
+EXPORT_SYMBOL(free_rpmsg_instance);
+
+void init_rpmsg_instance(struct rpmsg_instance *instance)
+{
+ init_completion(&instance->reply_arrived);
+ mutex_init(&instance->instance_lock);
+ mutex_init(&instance->rx_lock);
+}
+EXPORT_SYMBOL(init_rpmsg_instance);
+
+static int rpmsg_ipc_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+ int i;
+ struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel %s not created\n", rpdev->id.name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed rpmsg_ipc device %s\n", rpdev->id.name);
+
+ for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+ if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+
+ /* Allocate rpmsg instance for kernel IPC calls*/
+ ret = alloc_rpmsg_instance(rpdev,
+ &ddata[i].rpmsg_instance);
+ if (!ddata[i].rpmsg_instance) {
+ dev_err(&rpdev->dev,
+ "alloc rpmsg instance failed\n");
+ goto out;
+ }
+
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(ddata[i].rpmsg_instance);
+
+ ddata[i].rpdev = rpdev;
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static void rpmsg_ipc_remove(struct rpmsg_channel *rpdev)
+{
+ int i;
+ struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+ for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+ if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+ free_rpmsg_instance(rpdev, &ddata[i].rpmsg_instance);
+ break;
+ }
+ }
+ dev_info(&rpdev->dev, "Removed rpmsg_ipc device\n");
+}
+
+static void rpmsg_ipc_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id rpmsg_ipc_id_table[] = {
+ { .name = "rpmsg_ipc_command" },
+ { .name = "rpmsg_ipc_simple_command" },
+ { .name = "rpmsg_ipc_raw_command" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_ipc_id_table);
+
+static struct rpmsg_driver rpmsg_ipc = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = rpmsg_ipc_id_table,
+ .probe = rpmsg_ipc_probe,
+ .callback = rpmsg_ipc_cb,
+ .remove = rpmsg_ipc_remove,
+};
+
+static int __init rpmsg_ipc_init(void)
+{
+ return register_rpmsg_driver(&rpmsg_ipc);
+}
+subsys_initcall(rpmsg_ipc_init);
+
+static void __exit rpmsg_ipc_exit(void)
+{
+ return unregister_rpmsg_driver(&rpmsg_ipc);
+}
+module_exit(rpmsg_ipc_exit);
+
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel IPC RPMSG Driver");
+MODULE_LICENSE("GPL v2");
dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
msg->src, msg->dst, msg->len,
msg->flags, msg->reserved);
- print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+/* print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
msg, sizeof(*msg) + msg->len, true);
-
+*/
sg_init_one(&sg, msg, sizeof(*msg) + len);
mutex_lock(&vrp->tx_lock);
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
msg->src, msg->dst, msg->len,
msg->flags, msg->reserved);
- print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+/* print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
msg, sizeof(*msg) + msg->len, true);
-
+*/
/*
* We currently use fixed-sized buffers, so trivially sanitize
* the reported payload length.
struct device *dev = &vrp->vdev->dev;
int ret;
- print_hex_dump(KERN_DEBUG, "NS announcement: ",
+/* print_hex_dump(KERN_DEBUG, "NS announcement: ",
DUMP_PREFIX_NONE, 16, 1,
data, len, true);
-
+*/
if (len != sizeof(*msg)) {
dev_err(dev, "malformed ns msg (%d)\n", len);
return;
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sfi.h>
+#include <linux/io.h>
#include <asm-generic/rtc.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/mrst-vrtc.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_mid_rpmsg.h>
struct mrst_rtc {
struct rtc_device *rtc;
u8 suspend_ctrl;
};
+/* both platform and pnp busses use negative numbers for invalid irqs */
+#define is_valid_irq(n) ((n) >= 0)
+
static const char driver_name[] = "rtc_mrst";
#define RTC_IRQMASK (RTC_PF | RTC_AF)
+#define OSHOB_ALARM_OFFSET 0x68
+#define OSHOB_DAYW_OFFSET 0x00
+#define OSHOB_DAYM_OFFSET 0x01
+#define OSHOB_MON_OFFSET 0x02
+#define OSHOB_YEAR_OFFSET 0x03
+
+static u32 oshob_base;
+static void __iomem *oshob_addr;
+
+static struct rpmsg_instance *vrtc_mrst_instance;
+
static inline int is_intr(u8 rtc_intr)
{
if (!(rtc_intr & RTC_IRQF))
return uip;
}
+/* If the interrupt is of alarm-type-RTC_AF, then check if it's for
+ * the correct day. With the support for alarms more than 24-hours,
+ * alarm-date is compared with date-fields in OSHOB, as the vRTC
+ * doesn't have date-fields for alarm
+ */
+static int is_valid_af(u8 rtc_intr)
+{
+ char *p;
+ unsigned long vrtc_date, oshob_date;
+
+ if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+ (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+ if (rtc_intr & RTC_AF) {
+ p = (char *) &vrtc_date;
+ *(p+1) = vrtc_cmos_read(RTC_DAY_OF_MONTH);
+ *(p+2) = vrtc_cmos_read(RTC_MONTH);
+ *(p+3) = vrtc_cmos_read(RTC_YEAR);
+
+ oshob_date = readl(oshob_addr);
+ if ((oshob_date & 0xFFFFFF00)
+ != (vrtc_date & 0xFFFFFF00))
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* rtc_time's year contains the increment over 1900, but vRTC's YEAR
* register can't be programmed to value larger than 0x64, so vRTC
spin_unlock_irqrestore(&rtc_lock, flags);
- ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
+ ret = rpmsg_send_simple_command(vrtc_mrst_instance,
+ IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
return ret;
}
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char rtc_control;
- if (mrst->irq <= 0)
+ if (!is_valid_irq(mrst->irq))
return -EIO;
/* Basic alarms only support hour, minute, and seconds fields.
*/
rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
- if (is_intr(rtc_intr))
+ if (is_intr(rtc_intr) && is_valid_af(rtc_intr))
rtc_update_irq(mrst->rtc, 1, rtc_intr);
}
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char hrs, min, sec;
+ unsigned char wday, mday, mon, year;
int ret = 0;
- if (!mrst->irq)
+ if (!is_valid_irq(mrst->irq))
return -EIO;
hrs = t->time.tm_hour;
min = t->time.tm_min;
sec = t->time.tm_sec;
+ wday = t->time.tm_wday;
+ mday = t->time.tm_mday;
+ mon = t->time.tm_mon;
+ year = t->time.tm_year;
+
spin_lock_irq(&rtc_lock);
/* Next rtc irq must not be from previous alarm setting */
mrst_irq_disable(mrst, RTC_AIE);
vrtc_cmos_write(min, RTC_MINUTES_ALARM);
vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
- spin_unlock_irq(&rtc_lock);
-
- ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETALARM);
- if (ret)
- return ret;
+ if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+ (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+ /* Support for date-field in Alarm using OSHOB
+ * Since, vRTC doesn't have Alarm-registers for date-fields,
+ * write date-fields into OSHOB for SCU to sync to MSIC-RTC */
+ writeb(wday, oshob_addr+OSHOB_DAYW_OFFSET);
+ writeb(mday, oshob_addr+OSHOB_DAYM_OFFSET);
+ writeb(mon+1, oshob_addr+OSHOB_MON_OFFSET);
+ /* Adjust for the 1972/1900 */
+ writeb(year-72, oshob_addr+OSHOB_YEAR_OFFSET);
+ }
- spin_lock_irq(&rtc_lock);
if (t->enabled)
mrst_irq_enable(mrst, RTC_AIE);
return 0;
}
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+
/* Currently, the vRTC doesn't support UIE ON/OFF */
-static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+static int
+mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned long flags;
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ case RTC_AIE_ON:
+ if (!is_valid_irq(mrst->irq))
+ return -EINVAL;
+ break;
+ default:
+ /* PIE ON/OFF is handled by mrst_irq_set_state() */
+ return -ENOIOCTLCMD;
+ }
+
spin_lock_irqsave(&rtc_lock, flags);
- if (enabled)
- mrst_irq_enable(mrst, RTC_AIE);
- else
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm off */
mrst_irq_disable(mrst, RTC_AIE);
+ break;
+ case RTC_AIE_ON: /* alarm on */
+ mrst_irq_enable(mrst, RTC_AIE);
+ break;
+ }
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
+#else
+#define mrst_rtc_ioctl NULL
+#endif
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
#define mrst_procfs NULL
#endif
+static int mrst_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
+
+ if (enabled)
+ mrst_irq_enable(mrst, RTC_AIE);
+ else
+ mrst_irq_disable(mrst, RTC_AIE);
+
+ return 0;
+}
+
static const struct rtc_class_ops mrst_rtc_ops = {
- .read_time = mrst_read_time,
- .set_time = mrst_set_time,
- .read_alarm = mrst_read_alarm,
- .set_alarm = mrst_set_alarm,
- .proc = mrst_procfs,
- .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
+ .ioctl = mrst_rtc_ioctl,
+ .read_time = mrst_read_time,
+ .set_time = mrst_set_time,
+ .read_alarm = mrst_read_alarm,
+ .set_alarm = mrst_set_alarm,
+ .proc = mrst_procfs,
+ .alarm_irq_enable = mrst_alarm_irq_enable,
};
static struct mrst_rtc mrst_rtc;
static irqreturn_t mrst_rtc_irq(int irq, void *p)
{
u8 irqstat;
+ int ret = 0;
spin_lock(&rtc_lock);
/* This read will clear all IRQ flags inside Reg C */
irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
+ irqstat &= RTC_IRQMASK | RTC_IRQF;
+ ret = is_valid_af(irqstat);
spin_unlock(&rtc_lock);
- irqstat &= RTC_IRQMASK | RTC_IRQF;
if (is_intr(irqstat)) {
- rtc_update_irq(p, 1, irqstat);
+ /* If it's an alarm-interrupt, update RTC-IRQ only if it's
+ * for current day. Alarms beyond 24-hours will result in
+ * interrupts at given time, everyday till actual alarm-date.
+ * From hardware perspective, it's still a valid interrupt,
+ * hence need to return IRQ_HANDLED. */
+ if (ret)
+ rtc_update_irq(p, 1, irqstat);
+
return IRQ_HANDLED;
+ } else {
+ pr_err("vRTC: error in IRQ handler\n");
+ return IRQ_NONE;
}
- return IRQ_NONE;
}
-static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
- int rtc_irq)
+static int
+vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
{
int retval = 0;
unsigned char rtc_control;
if (!iomem)
return -ENODEV;
- iomem = request_mem_region(iomem->start, resource_size(iomem),
- driver_name);
+ iomem = request_mem_region(iomem->start,
+ iomem->end + 1 - iomem->start,
+ driver_name);
if (!iomem) {
dev_dbg(dev, "i/o mem already in use.\n");
return -EBUSY;
if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");
- if (rtc_irq) {
+ if (is_valid_irq(rtc_irq)) {
retval = request_irq(rtc_irq, mrst_rtc_irq,
- 0, dev_name(&mrst_rtc.rtc->dev),
+ IRQF_NO_SUSPEND, dev_name(&mrst_rtc.rtc->dev),
mrst_rtc.rtc);
if (retval < 0) {
dev_dbg(dev, "IRQ %d is already in use, err %d\n",
goto cleanup1;
}
}
- dev_dbg(dev, "initialised\n");
+
+ /* make RTC device wake capable from sleep */
+ device_init_wakeup(dev, true);
+
+ if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+ (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+ retval = rpmsg_send_command(vrtc_mrst_instance,
+ IPCMSG_GET_HOBADDR, 0, NULL, &oshob_base, 0, 1);
+ if (retval < 0) {
+ dev_dbg(dev,
+ "Unable to get OSHOB base address, err %d\n",
+ retval);
+ goto cleanup1;
+ }
+
+ oshob_addr = ioremap_nocache(oshob_base+OSHOB_ALARM_OFFSET, 4);
+ if (!oshob_addr) {
+ dev_dbg(dev, "Unable to do ioremap for OSHOB\n");
+ retval = -ENOMEM;
+ goto cleanup1;
+ }
+ }
+
+ dev_info(dev, "vRTC driver initialised\n");
return 0;
cleanup1:
rtc_mrst_do_shutdown();
- if (mrst->irq)
+ if (is_valid_irq(mrst->irq))
free_irq(mrst->irq, mrst->rtc);
+ if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+ (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+ if (oshob_addr != NULL)
+ iounmap(oshob_addr);
+ }
+
rtc_device_unregister(mrst->rtc);
mrst->rtc = NULL;
}
#ifdef CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+static int mrst_suspend(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char tmp;
*/
static inline int mrst_poweroff(struct device *dev)
{
- return mrst_suspend(dev, PMSG_HIBERNATE);
+ return mrst_suspend(dev);
}
static int mrst_resume(struct device *dev)
mask = vrtc_cmos_read(RTC_INTR_FLAGS);
mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
- if (!is_intr(mask))
+ if (!(is_intr(mask) && is_valid_af(mask)))
break;
rtc_update_irq(mrst->rtc, 1, mask);
MODULE_ALIAS("platform:vrtc_mrst");
+static const struct dev_pm_ops vrtc_mrst_platform_driver_pm_ops = {
+ .suspend = mrst_suspend,
+ .resume = mrst_resume,
+};
+
static struct platform_driver vrtc_mrst_platform_driver = {
.probe = vrtc_mrst_platform_probe,
.remove = vrtc_mrst_platform_remove,
.shutdown = vrtc_mrst_platform_shutdown,
- .driver = {
- .name = (char *) driver_name,
- .suspend = mrst_suspend,
- .resume = mrst_resume,
+ .driver.name = (char *) driver_name,
+ .driver.pm = &vrtc_mrst_platform_driver_pm_ops,
+};
+
+static int vrtc_mrst_init(void)
+{
+ return platform_driver_register(&vrtc_mrst_platform_driver);
+}
+
+static void vrtc_mrst_exit(void)
+{
+ platform_driver_unregister(&vrtc_mrst_platform_driver);
+}
+
+static int vrtc_mrst_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret;
+
+ if (rpdev == NULL) {
+ pr_err("vrtc_mrst rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
}
+
+ dev_info(&rpdev->dev, "Probed vrtc_mrst rpmsg device\n");
+
+ /* Allocate rpmsg instance for fw_update*/
+ ret = alloc_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+ if (!vrtc_mrst_instance) {
+ dev_err(&rpdev->dev, "kzalloc vrtc_mrst instance failed\n");
+ goto out;
+ }
+
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(vrtc_mrst_instance);
+
+ ret = vrtc_mrst_init();
+ if (ret)
+ free_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+
+out:
+ return ret;
+}
+
+static void vrtc_mrst_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ vrtc_mrst_exit();
+ free_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+ dev_info(&rpdev->dev, "Removed vrtc_mrst rpmsg device\n");
+}
+
+static void vrtc_mrst_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id vrtc_mrst_rpmsg_id_table[] = {
+ { .name = "rpmsg_vrtc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, vrtc_mrst_rpmsg_id_table);
+
+static struct rpmsg_driver vrtc_mrst_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = vrtc_mrst_rpmsg_id_table,
+ .probe = vrtc_mrst_rpmsg_probe,
+ .callback = vrtc_mrst_rpmsg_cb,
+ .remove = vrtc_mrst_rpmsg_remove,
};
-module_platform_driver(vrtc_mrst_platform_driver);
+static int __init vrtc_mrst_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&vrtc_mrst_rpmsg);
+}
+
+static void __exit vrtc_mrst_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&vrtc_mrst_rpmsg);
+}
+
+module_init(vrtc_mrst_rpmsg_init);
+module_exit(vrtc_mrst_rpmsg_exit);
MODULE_AUTHOR("Jacob Pan; Feng Tang");
MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
This enables using the Freescale i.MX SPI controllers in master
mode.
+config SPI_INTEL_MID_SSP
+ tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
+ depends on SPI_MASTER && INTEL_MID_DMAC
+ help
+ This is the unified SSP SPI slave controller driver for the Intel
+ MID platforms (Moorestown, Medfield, Clovertrial and
+ Merrifield), primarily used to implement a SPI host controller
+ driver over a SSP host controller.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP) += intel_mid_ssp_spi.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
--- /dev/null
+/*
+ * intel_mid_ssp_spi.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Ken Mills <ken.k.mills@intel.com>
+ * Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/completion.h>
+#include <asm/intel-mid.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static int ssp_timing_wr;
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+ int tlen1 = (len < sz ? len : sz);
+ int tlen2 = ((len - sz) > sz) ? sz : (len - sz);
+ unsigned char *p;
+ static char msg[MAX_SPI_TRANSFER_SIZE];
+
+ memset(msg, '\0', sizeof(msg));
+ p = buf;
+ while (p < buf + tlen1)
+ sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+ if (tlen2 > 0) {
+ sprintf(msg, "%s .....", msg);
+ p = (buf+len) - tlen2;
+ while (p < buf + len)
+ sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+ }
+
+ dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+ len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u8 ssp_cfg_get_mode(u8 ssp_cfg)
+{
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+ intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+ return (ssp_cfg) & 0x03;
+ else
+ return (ssp_cfg) & 0x07;
+}
+
+static inline u8 ssp_cfg_get_spi_bus_nb(u8 ssp_cfg)
+{
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+ intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+ return ((ssp_cfg) >> 2) & 0x07;
+ else
+ return ((ssp_cfg) >> 3) & 0x07;
+}
+
+static inline u8 ssp_cfg_is_spi_slave(u8 ssp_cfg)
+{
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+ intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+ return (ssp_cfg) & 0x20;
+ else
+ return (ssp_cfg) & 0x40;
+}
+
+static inline u32 is_tx_fifo_empty(struct ssp_drv_context *sspc)
+{
+ u32 sssr;
+ sssr = read_SSSR(sspc->ioaddr);
+ if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+ return 0;
+ else
+ return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_drv_context *sspc)
+{
+ return ((read_SSSR(sspc->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ write_SSCR1(read_SSCR1(reg) & ~sspc->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ u32 i = 0;
+
+ /* If the transmit fifo is not empty, reset the interface. */
+ if (!is_tx_fifo_empty(sspc)) {
+ dev_err(&sspc->pdev->dev, "TX FIFO not empty. Reset of SPI IF");
+ disable_interface(sspc);
+ return;
+ }
+
+ dev_dbg(&sspc->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+ while (!is_rx_fifo_empty(sspc) && (i < SPI_FIFO_SIZE + 1)) {
+ read_SSDR(reg);
+ i++;
+ }
+ WARN(i > 0, "%d words flush occured\n", i);
+
+ return;
+}
+
+static int null_writer(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ u8 n_bytes = sspc->n_bytes;
+
+ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ || (sspc->tx == sspc->tx_end))
+ return 0;
+
+ write_SSDR(0, reg);
+ sspc->tx += n_bytes;
+
+ return 1;
+}
+
+static int null_reader(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ u8 n_bytes = sspc->n_bytes;
+
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (sspc->rx < sspc->rx_end)) {
+ read_SSDR(reg);
+ sspc->rx += n_bytes;
+ }
+
+ return sspc->rx == sspc->rx_end;
+}
+
+static int u8_writer(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ || (sspc->tx == sspc->tx_end))
+ return 0;
+
+ write_SSDR(*(u8 *)(sspc->tx), reg);
+ ++sspc->tx;
+
+ return 1;
+}
+
+static int u8_reader(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (sspc->rx < sspc->rx_end)) {
+ *(u8 *)(sspc->rx) = read_SSDR(reg);
+ ++sspc->rx;
+ }
+
+ return sspc->rx == sspc->rx_end;
+}
+
+static int u16_writer(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ || (sspc->tx == sspc->tx_end))
+ return 0;
+
+ write_SSDR(*(u16 *)(sspc->tx), reg);
+ sspc->tx += 2;
+
+ return 1;
+}
+
+static int u16_reader(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ while ((read_SSSR(reg) & SSSR_RNE) && (sspc->rx < sspc->rx_end)) {
+ *(u16 *)(sspc->rx) = read_SSDR(reg);
+ sspc->rx += 2;
+ }
+
+ return sspc->rx == sspc->rx_end;
+}
+
+static int u32_writer(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ || (sspc->tx == sspc->tx_end))
+ return 0;
+
+ write_SSDR(*(u32 *)(sspc->tx), reg);
+ sspc->tx += 4;
+
+ return 1;
+}
+
+static int u32_reader(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ while ((read_SSSR(reg) & SSSR_RNE) && (sspc->rx < sspc->rx_end)) {
+ *(u32 *)(sspc->rx) = read_SSDR(reg);
+ sspc->rx += 4;
+ }
+
+ return sspc->rx == sspc->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+ struct ssp_drv_context *sspc = param;
+ bool ret = false;
+
+ if (!sspc->dmac1)
+ return ret;
+
+ if (chan->device->dev == &sspc->dmac1->dev)
+ ret = true;
+
+ return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @sspc: Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_drv_context *sspc)
+{
+ struct device *dev = &sspc->pdev->dev;
+
+ if (!sspc->dma_mapped)
+ return;
+ dma_unmap_single(dev, sspc->rx_dma, sspc->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(dev, sspc->tx_dma, sspc->len, PCI_DMA_TODEVICE);
+ sspc->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg: Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+ struct callback_param *cb_param = (struct callback_param *)arg;
+ struct ssp_drv_context *sspc = cb_param->drv_context;
+ struct device *dev = &sspc->pdev->dev;
+ void *reg = sspc->ioaddr;
+
+ if (cb_param->direction == TX_DIRECTION) {
+ dma_sync_single_for_cpu(dev, sspc->tx_dma,
+ sspc->len, DMA_TO_DEVICE);
+ sspc->txdma_done = 1;
+ } else {
+ sspc->rxdma_done = 1;
+ dma_sync_single_for_cpu(dev, sspc->rx_dma,
+ sspc->len, DMA_FROM_DEVICE);
+ }
+
+ dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+ cb_param->direction, sspc->rxdma_done,
+ sspc->txdma_done);
+
+ if (sspc->txdma_done && sspc->rxdma_done) {
+ /* Clear Status Register */
+ write_SSSR(sspc->clear_sr, reg);
+ dev_dbg(dev, "DMA done\n");
+ /* Disable Triggers to DMA or to CPU*/
+ disable_triggers(sspc);
+ unmap_dma_buffers(sspc);
+
+ queue_work(sspc->dma_wq, &sspc->complete_work);
+ }
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @sspc: Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_drv_context *sspc)
+{
+ struct intel_mid_dma_slave *rxs, *txs;
+ struct dma_slave_config *ds;
+ dma_cap_mask_t mask;
+ struct device *dev = &sspc->pdev->dev;
+ unsigned int device_id;
+
+ /* Configure RX channel parameters */
+ rxs = &sspc->dmas_rx;
+ ds = &rxs->dma_slave;
+
+ ds->direction = DMA_FROM_DEVICE;
+ rxs->hs_mode = LNW_DMA_HW_HS;
+ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+ ds->dst_addr_width = sspc->n_bytes;
+ ds->src_addr_width = sspc->n_bytes;
+
+ if (sspc->quirks & QUIRKS_PLATFORM_BYT) {
+ /*These are fixed HW info from Baytrail datasheet*/
+ rxs->device_instance = 1; /*DMA Req line*/
+ } else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+ rxs->device_instance = sspc->master->bus_num;
+ else
+ rxs->device_instance = 0;
+
+ /* Use a DMA burst according to the FIFO thresholds */
+ if (sspc->rx_fifo_threshold == 8) {
+ ds->src_maxburst = LNW_DMA_MSIZE_8;
+ ds->dst_maxburst = LNW_DMA_MSIZE_8;
+ } else if (sspc->rx_fifo_threshold == 4) {
+ ds->src_maxburst = LNW_DMA_MSIZE_4;
+ ds->dst_maxburst = LNW_DMA_MSIZE_4;
+ } else {
+ ds->src_maxburst = LNW_DMA_MSIZE_1;
+ ds->dst_maxburst = LNW_DMA_MSIZE_1;
+ }
+
+ /* Configure TX channel parameters */
+ txs = &sspc->dmas_tx;
+ ds = &txs->dma_slave;
+
+ ds->direction = DMA_TO_DEVICE;
+ txs->hs_mode = LNW_DMA_HW_HS;
+ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+ ds->src_addr_width = sspc->n_bytes;
+ ds->dst_addr_width = sspc->n_bytes;
+
+ if (sspc->quirks & QUIRKS_PLATFORM_BYT) {
+ /*These are fixed HW info from Baytrail datasheet*/
+ txs->device_instance = 0;/*DMA Req Line*/
+ } else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+ txs->device_instance = sspc->master->bus_num;
+ else
+ txs->device_instance = 0;
+
+ /* Use a DMA burst according to the FIFO thresholds */
+ if (sspc->rx_fifo_threshold == 8) {
+ ds->src_maxburst = LNW_DMA_MSIZE_8;
+ ds->dst_maxburst = LNW_DMA_MSIZE_8;
+ } else if (sspc->rx_fifo_threshold == 4) {
+ ds->src_maxburst = LNW_DMA_MSIZE_4;
+ ds->dst_maxburst = LNW_DMA_MSIZE_4;
+ } else {
+ ds->src_maxburst = LNW_DMA_MSIZE_1;
+ ds->dst_maxburst = LNW_DMA_MSIZE_1;
+ }
+
+ /* Nothing more to do if already initialized */
+ if (sspc->dma_initialized)
+ return;
+
+ /* Use DMAC1 */
+ if (sspc->quirks & QUIRKS_PLATFORM_MRST)
+ device_id = PCI_MRST_DMAC1_ID;
+ else if (sspc->quirks & QUIRKS_PLATFORM_BYT)
+ device_id = PCI_BYT_DMAC1_ID;
+ else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+ device_id = PCI_MRFL_DMAC_ID;
+ else
+ device_id = PCI_MDFL_DMAC1_ID;
+
+ sspc->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
+ if (!sspc->dmac1) {
+ dev_err(dev, "Can't find DMAC1");
+ return;
+ }
+
+ if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+ sspc->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+ 2 * MAX_SPI_TRANSFER_SIZE);
+ if (sspc->virt_addr_sram_rx)
+ sspc->virt_addr_sram_tx = sspc->virt_addr_sram_rx +
+ MAX_SPI_TRANSFER_SIZE;
+ else
+ dev_err(dev, "Virt_addr_sram_rx is null\n");
+ }
+
+ /* 1. Allocate rx channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ sspc->rxchan = dma_request_channel(mask, chan_filter, sspc);
+ if (!sspc->rxchan)
+ goto err_exit;
+
+ sspc->rxchan->private = rxs;
+
+ /* 2. Allocate tx channel */
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ sspc->txchan = dma_request_channel(mask, chan_filter, sspc);
+ if (!sspc->txchan)
+ goto free_rxchan;
+ else
+ sspc->txchan->private = txs;
+
+ /* set the dma done bit to 1 */
+ sspc->txdma_done = 1;
+ sspc->rxdma_done = 1;
+
+ sspc->tx_param.drv_context = sspc;
+ sspc->tx_param.direction = TX_DIRECTION;
+ sspc->rx_param.drv_context = sspc;
+ sspc->rx_param.direction = RX_DIRECTION;
+
+ sspc->dma_initialized = 1;
+ return;
+
+free_rxchan:
+ dma_release_channel(sspc->rxchan);
+err_exit:
+ dev_err(dev, "Error : DMA Channel Not available\n");
+
+ if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+ iounmap(sspc->virt_addr_sram_rx);
+
+ pci_dev_put(sspc->dmac1);
+ return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @sspc: Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_drv_context *sspc)
+{
+ dma_release_channel(sspc->txchan);
+ dma_release_channel(sspc->rxchan);
+
+ if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+ iounmap(sspc->virt_addr_sram_rx);
+
+ pci_dev_put(sspc->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @sspc: Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_drv_context *sspc)
+{
+ dma_addr_t ssdr_addr;
+ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+ struct dma_chan *txchan, *rxchan;
+ enum dma_ctrl_flags flag;
+ struct device *dev = &sspc->pdev->dev;
+
+ /* get Data Read/Write address */
+ ssdr_addr = (dma_addr_t)(sspc->paddr + 0x10);
+
+ if (sspc->tx_dma)
+ sspc->txdma_done = 0;
+
+ if (sspc->rx_dma)
+ sspc->rxdma_done = 0;
+
+ /* 2. prepare the RX dma transfer */
+ txchan = sspc->txchan;
+ rxchan = sspc->rxchan;
+
+ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+ /* Since the DMA is configured to do 32bits access */
+ /* to/from the DDR, the DMA transfer size must be */
+ /* a multiple of 4 bytes */
+ sspc->len_dma_rx = sspc->len & ~(4 - 1);
+ sspc->len_dma_tx = sspc->len_dma_rx;
+
+ /* In Rx direction, TRAIL Bytes are handled by memcpy */
+ if (sspc->rx_dma &&
+ (sspc->len_dma_rx >
+ sspc->rx_fifo_threshold * sspc->n_bytes))
+ sspc->len_dma_rx = TRUNCATE(sspc->len_dma_rx,
+ sspc->rx_fifo_threshold * sspc->n_bytes);
+ else if (!sspc->rx_dma)
+ dev_err(dev, "ERROR : rx_dma is null\r\n");
+ } else {
+ /* TRAIL Bytes are handled by DMA */
+ if (sspc->rx_dma) {
+ sspc->len_dma_rx = sspc->len;
+ sspc->len_dma_tx = sspc->len;
+ } else
+ dev_err(dev, "ERROR : sspc->rx_dma is null!\n");
+ }
+
+ sspc->dmas_rx.dma_slave.src_addr = ssdr_addr;
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+ (unsigned long)&(sspc->dmas_rx.dma_slave));
+ dma_sync_single_for_device(dev, sspc->rx_dma,
+ sspc->len, DMA_FROM_DEVICE);
+
+ rxdesc = rxchan->device->device_prep_dma_memcpy
+ (rxchan, /* DMA Channel */
+ sspc->rx_dma, /* DAR */
+ ssdr_addr, /* SAR */
+ sspc->len_dma_rx, /* Data Length */
+ flag); /* Flag */
+
+ if (rxdesc) {
+ rxdesc->callback = intel_mid_ssp_spi_dma_done;
+ rxdesc->callback_param = &sspc->rx_param;
+ } else {
+ dev_dbg(dev, "rxdesc is null! (len_dma_rx:%d)\n",
+ sspc->len_dma_rx);
+ sspc->rxdma_done = 1;
+ }
+
+ /* 3. prepare the TX dma transfer */
+ sspc->dmas_tx.dma_slave.dst_addr = ssdr_addr;
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+ (unsigned long)&(sspc->dmas_tx.dma_slave));
+ dma_sync_single_for_device(dev, sspc->tx_dma,
+ sspc->len, DMA_TO_DEVICE);
+
+ if (sspc->tx_dma) {
+ txdesc = txchan->device->device_prep_dma_memcpy
+ (txchan, /* DMA Channel */
+ ssdr_addr, /* DAR */
+ sspc->tx_dma, /* SAR */
+ sspc->len_dma_tx, /* Data Length */
+ flag); /* Flag */
+ if (txdesc) {
+ txdesc->callback = intel_mid_ssp_spi_dma_done;
+ txdesc->callback_param = &sspc->tx_param;
+ } else {
+ dev_dbg(dev, "txdesc is null! (len_dma_tx:%d)\n",
+ sspc->len_dma_tx);
+ sspc->txdma_done = 1;
+ }
+ } else {
+ dev_err(dev, "ERROR : sspc->tx_dma is null!\n");
+ return;
+ }
+
+ dev_dbg(dev, "DMA transfer len:%d len_dma_tx:%d len_dma_rx:%d\n",
+ sspc->len, sspc->len_dma_tx, sspc->len_dma_rx);
+
+ if (rxdesc || txdesc) {
+ if (rxdesc) {
+ dev_dbg(dev, "Firing DMA RX channel\n");
+ rxdesc->tx_submit(rxdesc);
+ }
+ if (txdesc) {
+ dev_dbg(dev, "Firing DMA TX channel\n");
+ txdesc->tx_submit(txdesc);
+ }
+ } else {
+ struct callback_param cb_param;
+ cb_param.drv_context = sspc;
+ dev_dbg(dev, "Bypassing DMA transfer\n");
+ intel_mid_ssp_spi_dma_done(&cb_param);
+ }
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @sspc: Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_drv_context *sspc)
+{
+ struct device *dev = &sspc->pdev->dev;
+
+ if (unlikely(sspc->dma_mapped)) {
+ dev_err(dev, "ERROR : DMA buffers already mapped\n");
+ return 0;
+ }
+ if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+ /* Copy sspc->tx into sram_tx */
+ memcpy_toio(sspc->virt_addr_sram_tx, sspc->tx, sspc->len);
+#ifdef DUMP_RX
+ dump_trailer(&sspc->pdev->dev, sspc->tx, sspc->len, 16);
+#endif
+ sspc->rx_dma = SRAM_RX_ADDR;
+ sspc->tx_dma = SRAM_TX_ADDR;
+ } else {
+ /* no QUIRKS_SRAM_ADDITIONAL_CPY */
+ if (unlikely(sspc->dma_mapped))
+ return 1;
+
+ sspc->tx_dma = dma_map_single(dev, sspc->tx, sspc->len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(dev, sspc->tx_dma))) {
+ dev_err(dev, "ERROR : tx dma mapping failed\n");
+ return 0;
+ }
+
+ sspc->rx_dma = dma_map_single(dev, sspc->rx, sspc->len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(dev, sspc->rx_dma))) {
+ dma_unmap_single(dev, sspc->tx_dma,
+ sspc->len, DMA_TO_DEVICE);
+ dev_err(dev, "ERROR : rx dma mapping failed\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @sspc: Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_drv_context *sspc)
+{
+ struct device *dev = &sspc->pdev->dev;
+ void *reg = sspc->ioaddr;
+
+ if (sspc->len != sspc->len_dma_rx) {
+ dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+ read_SSSR(reg));
+ sspc->rx += sspc->len_dma_rx;
+ sspc->tx += sspc->len_dma_tx;
+
+ while ((sspc->tx != sspc->tx_end) ||
+ (sspc->rx != sspc->rx_end)) {
+ sspc->read(sspc);
+ sspc->write(sspc);
+ }
+ }
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @sspc: Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_drv_context *sspc)
+{
+ u32 length = sspc->len;
+
+ if ((sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+ && (sspc->len > sspc->rx_fifo_threshold * sspc->n_bytes))
+ length = TRUNCATE(sspc->len,
+ sspc->rx_fifo_threshold * sspc->n_bytes);
+
+ memcpy_fromio(sspc->rx, sspc->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_drv_context *sspc)
+{
+ void *reg = sspc->ioaddr;
+ struct spi_message *msg;
+ struct device *dev = &sspc->pdev->dev;
+
+ if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+ pm_qos_update_request(&sspc->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+
+ if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+ sram_to_ddr_cpy(sspc);
+
+ if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+ drain_trail(sspc);
+ else
+ /* Stop getting Time Outs */
+ write_SSTO(0, reg);
+
+ sspc->cur_msg->status = 0;
+ sspc->cur_msg->actual_length = sspc->len;
+
+#ifdef DUMP_RX
+ dump_trailer(dev, sspc->rx, sspc->len, 16);
+#endif
+
+ if (sspc->cs_control)
+ sspc->cs_control(!sspc->cs_assert);
+
+ dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+ msg = sspc->cur_msg;
+ if (likely(msg->complete))
+ msg->complete(msg->context);
+ complete(&sspc->msg_done);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+ struct ssp_drv_context *sspc = container_of(work,
+ struct ssp_drv_context, complete_work);
+
+ int_transfer_complete(sspc);
+}
+
+static void poll_transfer_complete(struct ssp_drv_context *sspc)
+{
+ /* Update total byte transfered return count actual bytes read */
+ sspc->cur_msg->actual_length += sspc->len - (sspc->rx_end - sspc->rx);
+
+ sspc->cur_msg->status = 0;
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+ struct ssp_drv_context *sspc = dev_id;
+ void *reg = sspc->ioaddr;
+ struct device *dev = &sspc->pdev->dev;
+ u32 status = read_SSSR(reg);
+
+ /* It should never be our interrupt since SSP will */
+ /* only trigs interrupt for under/over run.*/
+ if (likely(!(status & sspc->mask_sr)))
+ return IRQ_NONE;
+
+ if (status & SSSR_ROR || status & SSSR_TUR) {
+ dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n", status);
+ WARN_ON(1);
+ if (status & SSSR_ROR)
+ dev_err(dev, "we have Overrun\n");
+ if (status & SSSR_TUR)
+ dev_err(dev, "we have Underrun\n");
+ }
+
+ /* We can fall here when not using DMA mode */
+ if (!sspc->cur_msg) {
+ disable_interface(sspc);
+ disable_triggers(sspc);
+ }
+ /* clear status register */
+ write_SSSR(sspc->clear_sr, reg);
+ return IRQ_HANDLED;
+}
+
+static void poll_writer(struct work_struct *work)
+{
+ struct ssp_drv_context *sspc =
+ container_of(work, struct ssp_drv_context, poll_write);
+
+ while (sspc->tx < sspc->tx_end)
+ sspc->write(sspc);
+
+}
+
+/*
+ * Perform a single transfer.
+ */
+static void poll_transfer(unsigned long data)
+{
+ struct ssp_drv_context *sspc = (void *)data;
+
+ while (!sspc->read(sspc))
+ cpu_relax();
+
+ poll_transfer_complete(sspc);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @sspc: Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_drv_context *sspc)
+{
+ u32 sssr;
+ u32 count = 0;
+ u32 cr0;
+ void *i2c_reg = sspc->I2C_ioaddr;
+ struct device *dev = &sspc->pdev->dev;
+ void *reg = sspc->ioaddr;
+ struct chip_data *chip = spi_get_ctldata(sspc->cur_msg->spi);
+ cr0 = chip->cr0;
+
+ dev_warn(dev, "In %s : Starting bit banging\n",
+ __func__);
+ if (read_SSSR(reg) & SSP_NOT_SYNC)
+ dev_warn(dev, "SSP clock desynchronized.\n");
+ if (!(read_SSCR0(reg) & SSCR0_SSE))
+ dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+ dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+ write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+ write_SSPSP(0x02010007, reg);
+
+ write_SSTO(chip->timeout, reg);
+ write_SSCR0(cr0, reg);
+
+ /*
+ * This routine uses the DFx block to override the SSP inputs
+ * and outputs allowing us to bit bang SSPSCLK. On Langwell,
+ * we have to generate the clock to clear busy.
+ */
+ write_I2CDATA(0x3, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CCTRL(0x01070034, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CDATA(0x00000099, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CCTRL(0x01070038, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ sssr = read_SSSR(reg);
+
+ /* Bit bang the clock until CSS clears */
+ while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+ write_I2CDATA(0x2, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CCTRL(0x01070034, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CDATA(0x3, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CCTRL(0x01070034, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ sssr = read_SSSR(reg);
+ count++;
+ }
+ if (count >= MAX_BITBANGING_LOOP)
+ dev_err(dev, "ERROR in %s : infinite loop on bit banging. Aborting\n",
+ __func__);
+
+ dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+ write_I2CDATA(0x0, i2c_reg);
+ udelay(I2C_ACCESS_USDELAY);
+ write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(struct ssp_drv_context *sspc, int speed)
+{
+ if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+ /* The clock divider shall stay between 0 and 4095. */
+ return clamp(25000000 / speed - 1, 0, 4095);
+ else
+ return clamp(100000000 / speed - 1, 3, 4095);
+}
+
+
+static int ssp_get_speed(struct ssp_drv_context *sspc, int clk_div)
+{
+ if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+ return 25000000 / (clk_div + 1);
+ else
+ return 100000000 / (clk_div + 1);
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi: Pointer to the spi_device struct
+ * @msg: Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct ssp_drv_context *sspc = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ spin_lock_irqsave(&sspc->lock, flags);
+ list_add_tail(&msg->queue, &sspc->queue);
+ if (!sspc->suspended)
+ queue_work(sspc->workqueue, &sspc->pump_messages);
+ spin_unlock_irqrestore(&sspc->lock, flags);
+
+ return 0;
+}
+
+static int handle_message(struct ssp_drv_context *sspc)
+{
+ struct chip_data *chip = NULL;
+ struct spi_transfer *transfer = NULL;
+ void *reg = sspc->ioaddr;
+ u32 cr0, saved_cr0, cr1, saved_cr1;
+ struct device *dev = &sspc->pdev->dev;
+ struct spi_message *msg = sspc->cur_msg;
+ u32 clk_div, saved_speed_hz, speed_hz;
+ u8 dma_enabled;
+ u32 timeout;
+ u8 chip_select;
+ u32 mask = 0;
+ int bits_per_word, saved_bits_per_word;
+ unsigned long flags;
+
+ chip = spi_get_ctldata(msg->spi);
+
+ /* get every chip data we need to handle atomically the full message */
+ spin_lock_irqsave(&sspc->lock, flags);
+ saved_cr0 = chip->cr0;
+ saved_cr1 = chip->cr1;
+ saved_bits_per_word = msg->spi->bits_per_word;
+ saved_speed_hz = chip->speed_hz;
+ sspc->cs_control = chip->cs_control;
+ timeout = chip->timeout;
+ chip_select = chip->chip_select;
+ dma_enabled = chip->dma_enabled;
+ spin_unlock_irqrestore(&sspc->lock, flags);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+ /* Check transfer length */
+ if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+ (transfer->len == 0))) {
+ dev_warn(dev, "transfer length null or greater than %d\n",
+ MAX_SPI_TRANSFER_SIZE);
+ dev_warn(dev, "length = %d\n", transfer->len);
+ msg->status = -EINVAL;
+
+ if (msg->complete)
+ msg->complete(msg->context);
+ complete(&sspc->msg_done);
+ return 0;
+ }
+
+ /* If the bits_per_word field in non-zero in the spi_transfer provided
+ * by the user-space, consider this value. Otherwise consider the
+ * default bits_per_word field from the spi setting. */
+ if (transfer->bits_per_word) {
+ bits_per_word = transfer->bits_per_word;
+ cr0 = saved_cr0;
+ cr0 &= ~(SSCR0_EDSS | SSCR0_DSS);
+ cr0 |= SSCR0_DataSize(bits_per_word > 16 ?
+ bits_per_word - 16 : bits_per_word)
+ | (bits_per_word > 16 ? SSCR0_EDSS : 0);
+ } else {
+ /* Use default value. */
+ bits_per_word = saved_bits_per_word;
+ cr0 = saved_cr0;
+ }
+
+ if ((bits_per_word < MIN_BITS_PER_WORD
+ || bits_per_word > MAX_BITS_PER_WORD)) {
+ dev_warn(dev, "invalid wordsize\n");
+ msg->status = -EINVAL;
+ if (msg->complete)
+ msg->complete(msg->context);
+ complete(&sspc->msg_done);
+ return 0;
+ }
+
+ /* Check message length and bit per words consistency */
+ if (bits_per_word <= 8)
+ mask = 0;
+ else if (bits_per_word <= 16)
+ mask = 1;
+ else if (bits_per_word <= 32)
+ mask = 3;
+
+ if (transfer->len & mask) {
+ dev_warn(dev,
+ "message rejected : data length %d not multiple of %d "
+ "while in %d bits mode\n",
+ transfer->len,
+ mask + 1,
+ (mask == 1) ? 16 : 32);
+ msg->status = -EINVAL;
+ if (msg->complete)
+ msg->complete(msg->context);
+ complete(&sspc->msg_done);
+ return 0;
+ }
+
+ /* Flush any remaining data (in case of failed previous transfer) */
+ flush(sspc);
+
+ dev_dbg(dev, "%d bits/word, mode %d\n",
+ bits_per_word, msg->spi->mode & 0x3);
+ if (bits_per_word <= 8) {
+ sspc->n_bytes = 1;
+ sspc->read = u8_reader;
+ sspc->write = u8_writer;
+ } else if (bits_per_word <= 16) {
+ sspc->n_bytes = 2;
+ sspc->read = u16_reader;
+ sspc->write = u16_writer;
+ } else if (bits_per_word <= 32) {
+ if (!ssp_timing_wr)
+ cr0 |= SSCR0_EDSS;
+ sspc->n_bytes = 4;
+ sspc->read = u32_reader;
+ sspc->write = u32_writer;
+ }
+ sspc->tx = (void *)transfer->tx_buf;
+ sspc->rx = (void *)transfer->rx_buf;
+ sspc->len = transfer->len;
+ sspc->cs_change = transfer->cs_change;
+
+ if (likely(dma_enabled)) {
+ sspc->dma_mapped = map_dma_buffers(sspc);
+ if (unlikely(!sspc->dma_mapped))
+ return 0;
+ }
+ sspc->tx = (void *)transfer->tx_buf;
+ sspc->rx = (void *)transfer->rx_buf;
+ sspc->len = transfer->len;
+ sspc->cs_control = chip->cs_control;
+ sspc->cs_change = transfer->cs_change;
+
+ if (likely(chip->dma_enabled)) {
+ sspc->dma_mapped = map_dma_buffers(sspc);
+ if (unlikely(!sspc->dma_mapped))
+ return 0;
+ } else {
+ sspc->write = sspc->tx ? sspc->write : null_writer;
+ sspc->read = sspc->rx ? sspc->read : null_reader;
+ }
+ sspc->tx_end = sspc->tx + transfer->len;
+ sspc->rx_end = sspc->rx + transfer->len;
+
+ /* [REVERT ME] Bug in status register clear for Tangier simulation */
+ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+ (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+ if ((intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_VP &&
+ (intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_HVP)))
+ write_SSSR(sspc->clear_sr, reg);
+ } else /* Clear status */
+ write_SSSR(sspc->clear_sr, reg);
+
+ /* setup the CR1 control register */
+ cr1 = saved_cr1 | sspc->cr1_sig;
+
+ if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+ /* in case of len smaller than burst size, adjust the RX */
+ /* threshold. All other cases will use the default threshold */
+ /* value. The RX fifo threshold must be aligned with the DMA */
+ /* RX transfer size, which may be limited to a multiple of 4 */
+ /* bytes due to 32bits DDR access. */
+ if (sspc->len / sspc->n_bytes <= sspc->rx_fifo_threshold) {
+ u32 rx_fifo_threshold;
+
+ rx_fifo_threshold = (sspc->len & ~(4 - 1)) /
+ sspc->n_bytes;
+ cr1 &= ~(SSCR1_RFT);
+ cr1 |= SSCR1_RxTresh(rx_fifo_threshold) & SSCR1_RFT;
+ } else
+ write_SSTO(timeout, reg);
+ }
+ dev_dbg(dev, "transfer len:%d n_bytes:%d cr0:%x cr1:%x",
+ sspc->len, sspc->n_bytes, cr0, cr1);
+
+ /* first set CR1 */
+ write_SSCR1(cr1, reg);
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ write_SSFS((1 << chip_select), reg);
+
+ /* recalculate the frequency for each transfer */
+ if (transfer->speed_hz)
+ speed_hz = transfer->speed_hz;
+ else
+ speed_hz = saved_speed_hz;
+
+ clk_div = ssp_get_clk_div(sspc, speed_hz);
+
+ cr0 &= ~SSCR0_SCR;
+ cr0 |= (clk_div & 0xFFF) << 8;
+
+ /* Do bitbanging only if SSP not-enabled or not-synchronized */
+ if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+ (!(read_SSCR0(reg) & SSCR0_SSE))) &&
+ (sspc->quirks & QUIRKS_BIT_BANGING))) {
+ start_bitbanging(sspc);
+ } else {
+
+ /* if speed is higher than 6.25Mhz, enable clock delay */
+ if (speed_hz > 6250000)
+ write_SSCR2((read_SSCR2(reg) | SSCR2_CLK_DEL_EN), reg);
+ else
+ write_SSCR2((read_SSCR2(reg) & ~SSCR2_CLK_DEL_EN), reg);
+
+ /* (re)start the SSP */
+ if (ssp_timing_wr) {
+ dev_dbg(dev, "original cr0 before reset:%x",
+ cr0);
+ /*we should not disable TUM and RIM interrup*/
+ write_SSCR0(0x0000000F, reg);
+ cr0 &= ~(SSCR0_SSE);
+ dev_dbg(dev, "reset ssp:cr0:%x", cr0);
+ write_SSCR0(cr0, reg);
+ cr0 |= SSCR0_SSE;
+ dev_dbg(dev, "reset ssp:cr0:%x", cr0);
+ write_SSCR0(cr0, reg);
+ } else
+ write_SSCR0(cr0, reg);
+ }
+
+ if (sspc->cs_control)
+ sspc->cs_control(sspc->cs_assert);
+
+ if (likely(dma_enabled)) {
+ if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+ pm_qos_update_request(&sspc->pm_qos_req,
+ MIN_EXIT_LATENCY);
+ dma_transfer(sspc);
+ } else {
+ /* Do the transfer syncronously */
+ queue_work(sspc->wq_poll_write, &sspc->poll_write);
+ poll_transfer((unsigned long)sspc);
+ }
+
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)
+ || sspc->cs_change) {
+ if (sspc->cs_control)
+ sspc->cs_control(!sspc->cs_assert);
+ }
+
+ } /* end of list_for_each_entry */
+
+ /* Now we are done with this entire message */
+ if (!dma_enabled) {
+ if (likely(msg->complete))
+ msg->complete(msg->context);
+ complete(&sspc->msg_done);
+ }
+
+ return 0;
+}
+
+static void pump_messages(struct work_struct *work)
+{
+ struct ssp_drv_context *sspc =
+ container_of(work, struct ssp_drv_context, pump_messages);
+ struct device *dev = &sspc->pdev->dev;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ pm_runtime_get_sync(dev);
+ spin_lock_irqsave(&sspc->lock, flags);
+ while (!list_empty(&sspc->queue)) {
+ if (sspc->suspended)
+ break;
+ msg = list_entry(sspc->queue.next, struct spi_message, queue);
+ list_del_init(&msg->queue);
+ sspc->cur_msg = msg;
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ INIT_COMPLETION(sspc->msg_done);
+ handle_message(sspc);
+ wait_for_completion(&sspc->msg_done);
+ spin_lock_irqsave(&sspc->lock, flags);
+ sspc->cur_msg = NULL;
+ }
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi: Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+ struct intel_mid_ssp_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+ struct ssp_drv_context *sspc =
+ spi_master_get_devdata(spi->master);
+ u32 tx_fifo_threshold;
+ u32 burst_size;
+ u32 clk_div;
+ static u32 one_time_setup = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sspc->lock, flags);
+ if (!spi->bits_per_word)
+ spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+ if ((spi->bits_per_word < MIN_BITS_PER_WORD
+ || spi->bits_per_word > MAX_BITS_PER_WORD)) {
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return -EINVAL;
+ }
+
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev,
+ "failed setup: can't allocate chip data\n");
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return -ENOMEM;
+ }
+ }
+ chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+ spi->bits_per_word - 16 : spi->bits_per_word)
+ | SSCR0_SSE
+ | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+ /* protocol drivers may change the chip settings, so... */
+ /* if chip_info exists, use it */
+ chip_info = spi->controller_data;
+
+ /* chip_info isn't always needed */
+ chip->cr1 = 0;
+ if (chip_info) {
+ /* If user requested CS Active High need to verify that there
+ * is no transfer pending. If this is the case, kindly fail. */
+ if ((spi->mode & SPI_CS_HIGH) != sspc->cs_assert) {
+ if (sspc->cur_msg) {
+ dev_err(&spi->dev, "message pending... Failing\n");
+ /* A message is currently in transfer. Do not toggle CS */
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return -EAGAIN;
+ }
+ if (!chip_info->cs_control) {
+ /* unable to control cs by hand */
+ dev_err(&spi->dev,
+ "This CS does not support SPI_CS_HIGH flag\n");
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return -EINVAL;
+ }
+ sspc->cs_assert = spi->mode & SPI_CS_HIGH;
+ chip_info->cs_control(!sspc->cs_assert);
+ }
+
+ burst_size = chip_info->burst_size;
+ if (burst_size > IMSS_FIFO_BURST_8)
+ burst_size = DFLT_FIFO_BURST_SIZE;
+
+ chip->timeout = chip_info->timeout;
+
+ if (chip_info->enable_loopback)
+ chip->cr1 |= SSCR1_LBM;
+
+ chip->dma_enabled = chip_info->dma_enabled;
+ chip->cs_control = chip_info->cs_control;
+
+ /* Request platform-specific gpio and pinmux here since
+ * it is not possible to get the intel_mid_ssp_spi_chip
+ * structure in probe */
+ if (one_time_setup && !chip_info->dma_enabled
+ && chip_info->platform_pinmux) {
+ chip_info->platform_pinmux();
+ one_time_setup = 0;
+ }
+
+ } else {
+ /* if no chip_info provided by protocol driver, */
+ /* set default values */
+ dev_info(&spi->dev, "setting default chip values\n");
+
+ burst_size = DFLT_FIFO_BURST_SIZE;
+ chip->dma_enabled = 1;
+ if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+ chip->timeout = 0;
+ else
+ chip->timeout = DFLT_TIMEOUT_VAL;
+ }
+ /* Set FIFO thresholds according to burst_size */
+ if (burst_size == IMSS_FIFO_BURST_8)
+ sspc->rx_fifo_threshold = 8;
+ else if (burst_size == IMSS_FIFO_BURST_4)
+ sspc->rx_fifo_threshold = 4;
+ else
+ sspc->rx_fifo_threshold = 1;
+ /*FIXME:this is workaround.
+ On MRST, in DMA mode, it is very strang that RX fifo can't reach
+ burst size.*/
+ if (sspc->quirks & QUIRKS_PLATFORM_MRFL && chip->dma_enabled)
+ sspc->rx_fifo_threshold = 1;
+ tx_fifo_threshold = SPI_FIFO_SIZE - sspc->rx_fifo_threshold;
+ chip->cr1 |= (SSCR1_RxTresh(sspc->rx_fifo_threshold) &
+ SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) & SSCR1_TFT);
+
+ sspc->dma_mapped = 0;
+
+ /* setting phase and polarity. spi->mode comes from boardinfo */
+ if ((spi->mode & SPI_CPHA) != 0)
+ chip->cr1 |= SSCR1_SPH;
+ if ((spi->mode & SPI_CPOL) != 0)
+ chip->cr1 |= SSCR1_SPO;
+
+ if (sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+ /* set slave mode */
+ chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+ chip->cr1 |= SSCR1_SCFR; /* clock is not free running */
+
+ if (spi->bits_per_word <= 8) {
+ chip->n_bytes = 1;
+ } else if (spi->bits_per_word <= 16) {
+ chip->n_bytes = 2;
+ } else if (spi->bits_per_word <= 32) {
+ chip->n_bytes = 4;
+ } else {
+ dev_err(&spi->dev, "invalid wordsize\n");
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return -EINVAL;
+ }
+
+ if ((sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+ clk_div = ssp_get_clk_div(sspc, spi->max_speed_hz);
+ chip->cr0 |= (clk_div & 0xFFF) << 8;
+ spi->max_speed_hz = ssp_get_speed(sspc, clk_div);
+ chip->speed_hz = spi->max_speed_hz;
+ dev_dbg(&spi->dev, "spi->max_speed_hz:%d clk_div:%x cr0:%x",
+ spi->max_speed_hz, clk_div, chip->cr0);
+ }
+ chip->bits_per_word = spi->bits_per_word;
+ chip->chip_select = spi->chip_select;
+
+ spi_set_ctldata(spi, chip);
+
+ /* setup of sspc members that will not change across transfers */
+
+ if (chip->dma_enabled) {
+ sspc->n_bytes = chip->n_bytes;
+ intel_mid_ssp_spi_dma_init(sspc);
+ sspc->cr1_sig = SSCR1_TSRE | SSCR1_RSRE;
+ sspc->mask_sr = SSSR_ROR | SSSR_TUR;
+ if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+ sspc->cr1_sig |= SSCR1_TRAIL;
+ } else {
+ sspc->cr1_sig = SSCR1_TINTE;
+ sspc->mask_sr = SSSR_ROR | SSSR_TUR | SSSR_TINT;
+ }
+ sspc->clear_sr = SSSR_TUR | SSSR_ROR | SSSR_TINT;
+
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ return 0;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi: Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+ struct ssp_drv_context *sspc =
+ spi_master_get_devdata(spi->master);
+
+ if (sspc->dma_initialized)
+ intel_mid_ssp_spi_dma_exit(sspc);
+
+ /* Remove the PM_QOS request */
+ if (sspc->quirks & QUIRKS_USE_PM_QOS)
+ pm_qos_remove_request(&sspc->pm_qos_req);
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev: Pointer to the pci_dev struct
+ * @ent: Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct ssp_drv_context *sspc = 0;
+ int status;
+ u32 iolen = 0;
+ u8 ssp_cfg;
+ int pos;
+ void __iomem *syscfg_ioaddr;
+ unsigned long syscfg;
+
+ /* Check if the SSP we are probed for has been allocated */
+ /* to operate as SPI. This information is retreived from */
+ /* the field adid of the Vendor-Specific PCI capability */
+ /* which is used as a configuration register. */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+ if (pos > 0) {
+ pci_read_config_byte(pdev,
+ pos + VNDR_CAPABILITY_ADID_OFFSET,
+ &ssp_cfg);
+ } else {
+ dev_info(dev, "No Vendor Specific PCI capability\n");
+ goto err_abort_probe;
+ }
+
+ if (ssp_cfg_get_mode(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+ dev_info(dev, "Unsupported SSP mode (%02xh)\n", ssp_cfg);
+ goto err_abort_probe;
+ }
+
+ dev_info(dev, "found PCI SSP controller (ID: %04xh:%04xh cfg: %02xh)\n",
+ pdev->vendor, pdev->device, ssp_cfg);
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return status;
+
+ /* Allocate Slave with space for sspc and null dma buffer */
+ master = spi_alloc_master(dev, sizeof(struct ssp_drv_context));
+
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_slave\n");
+ status = -ENOMEM;
+ goto err_free_0;
+ }
+
+ sspc = spi_master_get_devdata(master);
+ sspc->master = master;
+
+ sspc->pdev = pdev;
+ sspc->quirks = ent->driver_data;
+
+ /* Set platform & configuration quirks */
+ if (sspc->quirks & QUIRKS_PLATFORM_MRST) {
+ /* Apply bit banging workarround on MRST */
+ sspc->quirks |= QUIRKS_BIT_BANGING;
+ /* MRST slave mode workarrounds */
+ if (ssp_cfg_is_spi_slave(ssp_cfg))
+ sspc->quirks |= QUIRKS_USE_PM_QOS |
+ QUIRKS_SRAM_ADDITIONAL_CPY;
+ }
+ sspc->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+ if (ssp_cfg_is_spi_slave(ssp_cfg))
+ sspc->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ master->bus_num = ssp_cfg_get_spi_bus_nb(ssp_cfg);
+ master->num_chipselect = 4;
+ master->cleanup = cleanup;
+ master->setup = setup;
+ master->transfer = transfer;
+ sspc->dma_wq = create_workqueue("intel_mid_ssp_spi");
+ INIT_WORK(&sspc->complete_work, int_transfer_complete_work);
+
+ sspc->dma_initialized = 0;
+ sspc->suspended = 0;
+ sspc->cur_msg = NULL;
+
+ /* get basic io resource and map it */
+ sspc->paddr = pci_resource_start(pdev, 0);
+ iolen = pci_resource_len(pdev, 0);
+
+ status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+ if (status)
+ goto err_free_1;
+
+ sspc->ioaddr = ioremap_nocache(sspc->paddr, iolen);
+ if (!sspc->ioaddr) {
+ status = -ENOMEM;
+ goto err_free_2;
+ }
+ dev_dbg(dev, "paddr = : %08lx", sspc->paddr);
+ dev_dbg(dev, "ioaddr = : %p\n", sspc->ioaddr);
+ dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+ dev_dbg(dev, "quirks = : %08lx\n", sspc->quirks);
+
+ if (sspc->quirks & QUIRKS_BIT_BANGING) {
+ /* Bit banging on the clock is done through */
+ /* DFT which is available through I2C. */
+ /* get base address of I2C_Serbus registers */
+ sspc->I2C_paddr = 0xff12b000;
+ sspc->I2C_ioaddr = ioremap_nocache(sspc->I2C_paddr, 0x10);
+ if (!sspc->I2C_ioaddr) {
+ status = -ENOMEM;
+ goto err_free_3;
+ }
+ }
+
+ /* Attach to IRQ */
+ sspc->irq = pdev->irq;
+ status = request_irq(sspc->irq, ssp_int, IRQF_SHARED,
+ "intel_mid_ssp_spi", sspc);
+
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+ if ((intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_SLE) ||
+ (intel_mid_identify_sim() ==
+ INTEL_MID_CPU_SIMULATION_NONE)) {
+ /* [REVERT ME] Tangier SLE not supported.
+ * Requires debug before removal. Assume
+ * also required in Si. */
+ disable_irq_nosync(sspc->irq);
+ }
+ if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)
+ ssp_timing_wr = 1;
+ }
+
+ if (status < 0) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto err_free_4;
+ }
+
+ if (sspc->quirks & QUIRKS_PLATFORM_MDFL) {
+ /* get base address of DMA selector. */
+ syscfg = sspc->paddr - SYSCFG;
+ syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+ if (!syscfg_ioaddr) {
+ status = -ENOMEM;
+ goto err_free_5;
+ }
+ iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+ }
+
+ INIT_LIST_HEAD(&sspc->queue);
+ init_completion(&sspc->msg_done);
+ spin_lock_init(&sspc->lock);
+ INIT_WORK(&sspc->pump_messages, pump_messages);
+ sspc->workqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
+
+ INIT_WORK(&sspc->poll_write, poll_writer);
+ sspc->wq_poll_write = create_singlethread_workqueue("spi_poll_wr");
+
+ /* Register with the SPI framework */
+ dev_info(dev, "register with SPI framework (bus spi%d)\n",
+ master->bus_num);
+
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi\n");
+ goto err_free_5;
+ }
+
+ pci_set_drvdata(pdev, sspc);
+
+ /* Create the PM_QOS request */
+ if (sspc->quirks & QUIRKS_USE_PM_QOS)
+ pm_qos_add_request(&sspc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 25);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev))
+ dev_err(&pdev->dev, "spi runtime pm not enabled!\n");
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return status;
+
+err_free_5:
+ free_irq(sspc->irq, sspc);
+err_free_4:
+ iounmap(sspc->I2C_ioaddr);
+err_free_3:
+ iounmap(sspc->ioaddr);
+err_free_2:
+ pci_release_region(pdev, 0);
+err_free_1:
+ spi_master_put(master);
+err_free_0:
+ pci_disable_device(pdev);
+
+ return status;
+err_abort_probe:
+ dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev: Pointer to the pci_dev struct
+ */
+static void intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+ struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+ if (!sspc)
+ return;
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ if (sspc->dma_wq)
+ destroy_workqueue(sspc->dma_wq);
+ if (sspc->workqueue)
+ destroy_workqueue(sspc->workqueue);
+
+ /* Release IRQ */
+ free_irq(sspc->irq, sspc);
+
+ if (sspc->ioaddr)
+ iounmap(sspc->ioaddr);
+ if (sspc->quirks & QUIRKS_BIT_BANGING && sspc->I2C_ioaddr)
+ iounmap(sspc->I2C_ioaddr);
+
+ /* disconnect from the SPI framework */
+ if (sspc->master)
+ spi_unregister_master(sspc->master);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+
+ return;
+}
+
+#ifdef CONFIG_PM
+static int intel_mid_ssp_spi_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+ unsigned long flags;
+ int loop = 26;
+
+ dev_dbg(dev, "suspend\n");
+
+ spin_lock_irqsave(&sspc->lock, flags);
+ sspc->suspended = 1;
+ /*
+ * If there is one msg being handled, wait 500ms at most,
+ * if still not done, return busy
+ */
+ while (sspc->cur_msg && --loop) {
+ spin_unlock_irqrestore(&sspc->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&sspc->lock, flags);
+ if (!loop)
+ sspc->suspended = 0;
+ }
+ spin_unlock_irqrestore(&sspc->lock, flags);
+
+ if (loop)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+static int intel_mid_ssp_spi_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+ dev_dbg(dev, "resume\n");
+ spin_lock(&sspc->lock);
+ sspc->suspended = 0;
+ if (!list_empty(&sspc->queue))
+ queue_work(sspc->workqueue, &sspc->pump_messages);
+ spin_unlock(&sspc->lock);
+ return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "runtime suspend called\n");
+ return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "runtime resume called\n");
+ return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_idle(struct device *dev)
+{
+ int err;
+
+ dev_dbg(dev, "runtime idle called\n");
+ if (system_state == SYSTEM_BOOTING)
+ /* if SSP SPI UART is set as default console and earlyprintk
+ * is enabled, it cannot shutdown SSP controller during booting.
+ */
+ err = pm_schedule_suspend(dev, 30000);
+ else
+ err = pm_schedule_suspend(dev, 500);
+
+ return err;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#define intel_mid_ssp_spi_runtime_suspend NULL
+#define intel_mid_ssp_spi_runtime_resume NULL
+#define intel_mid_ssp_spi_runtime_idle NULL
+#endif /* CONFIG_PM */
+
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ /* MRST SSP0 */
+ { PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
+ /* MDFL SSP0 */
+ { PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
+ /* MDFL SSP1 */
+ { PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
+ /* MDFL SSP3 */
+ { PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
+ /* MRFL SSP5 */
+ { PCI_VDEVICE(INTEL, 0x1194), QUIRKS_PLATFORM_MRFL},
+ /* BYT SSP3 */
+ { PCI_VDEVICE(INTEL, 0x0f0e), QUIRKS_PLATFORM_BYT},
+ {},
+};
+
+static const struct dev_pm_ops intel_mid_ssp_spi_pm_ops = {
+ .suspend = intel_mid_ssp_spi_suspend,
+ .resume = intel_mid_ssp_spi_resume,
+ .runtime_suspend = intel_mid_ssp_spi_runtime_suspend,
+ .runtime_resume = intel_mid_ssp_spi_runtime_resume,
+ .runtime_idle = intel_mid_ssp_spi_runtime_idle,
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pci_ids,
+ .probe = intel_mid_ssp_spi_probe,
+ .remove = intel_mid_ssp_spi_remove,
+ .driver = {
+ .pm = &intel_mid_ssp_spi_pm_ops,
+ },
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+ return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+ pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/types.h>
#include "spi-dw.h"
dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
if (!dws->dmac)
dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dws->dmac)
+ dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x08EF, NULL);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
{
struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
struct dma_chan *txchan, *rxchan;
- struct dma_slave_config txconf, rxconf;
+ struct dma_slave_config *txconf, *rxconf;
u16 dma_ctrl = 0;
+ enum dma_ctrl_flags flag;
+ struct device *dev = &dws->master->dev;
+ struct intel_mid_dma_slave *rxs, *txs;
/* 1. setup DMA related registers */
if (cs_change) {
txchan = dws->txchan;
rxchan = dws->rxchan;
- /* 2. Prepare the TX dma transfer */
- txconf.direction = DMA_MEM_TO_DEV;
- txconf.dst_addr = dws->dma_addr;
- txconf.dst_maxburst = LNW_DMA_MSIZE_16;
- txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- txconf.device_fc = false;
+ txs = txchan->private;
+ rxs = rxchan->private;
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &txconf);
+ txconf = &txs->dma_slave;
+ rxconf = &rxs->dma_slave;
- memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
- dws->tx_sgl.dma_address = dws->tx_dma;
- dws->tx_sgl.length = dws->len;
+ flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
- txdesc = dmaengine_prep_slave_sg(txchan,
- &dws->tx_sgl,
- 1,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
- txdesc->callback = dw_spi_dma_done;
- txdesc->callback_param = dws;
+ /* 2. Prepare the TX dma transfer */
+ txconf->direction = DMA_MEM_TO_DEV;
+ txconf->dst_addr = dws->dma_addr;
+ txconf->src_maxburst = LNW_DMA_MSIZE_16;
+ txconf->dst_maxburst = LNW_DMA_MSIZE_16;
+ txconf->src_addr_width = dws->dma_width;
+ txconf->dst_addr_width = dws->dma_width;
+ txconf->device_fc = false;
+
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+ (unsigned long) txconf);
+
+ txdesc = txchan->device->device_prep_dma_memcpy
+ (txchan, /* DMA Channel */
+ dws->dma_addr, /* DAR */
+ dws->tx_dma, /* SAR */
+ dws->len, /* Data Length */
+ flag);
+ if (txdesc) {
+ txdesc->callback = dw_spi_dma_done;
+ txdesc->callback_param = dws;
+ } else {
+ dev_err(dev, "ERROR: prepare txdesc failed\n");
+ return -EINVAL;
+ }
/* 3. Prepare the RX dma transfer */
- rxconf.direction = DMA_DEV_TO_MEM;
- rxconf.src_addr = dws->dma_addr;
- rxconf.src_maxburst = LNW_DMA_MSIZE_16;
- rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- rxconf.device_fc = false;
+ rxconf->direction = DMA_DEV_TO_MEM;
+ rxconf->src_addr = dws->dma_addr;
+ rxconf->src_maxburst = LNW_DMA_MSIZE_16;
+ rxconf->dst_maxburst = LNW_DMA_MSIZE_16;
+ rxconf->dst_addr_width = dws->dma_width;
+ rxconf->src_addr_width = dws->dma_width;
+ rxconf->device_fc = false;
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rxconf);
-
- memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
- dws->rx_sgl.dma_address = dws->rx_dma;
- dws->rx_sgl.length = dws->len;
-
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- &dws->rx_sgl,
- 1,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
- rxdesc->callback = dw_spi_dma_done;
- rxdesc->callback_param = dws;
+ (unsigned long) rxconf);
+
+ rxdesc = rxchan->device->device_prep_dma_memcpy
+ (rxchan, /* DMA Channel */
+ dws->rx_dma, /* DAR */
+ dws->dma_addr, /* SAR */
+ dws->len, /* Data Length */
+ flag);
+ if (rxdesc) {
+ rxdesc->callback = dw_spi_dma_done;
+ rxdesc->callback_param = dws;
+ } else {
+ dev_err(dev, "ERROR: prepare rxdesc failed\n");
+ return -EINVAL;
+ }
/* rx must be started before tx due to spi instinct */
rxdesc->tx_submit(rxdesc);
return 0;
}
+static int mid_spi_dma_suspend(struct dw_spi *dws)
+{
+ struct dma_chan *txchan, *rxchan;
+
+ txchan = dws->txchan;
+ rxchan = dws->rxchan;
+
+ txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+ txchan->device->device_control(txchan, DMA_PAUSE, 0);
+ rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+
+ return 0;
+}
+
+static int mid_spi_dma_resume(struct dw_spi *dws)
+{
+ struct dma_chan *txchan, *rxchan;
+
+ txchan = dws->txchan;
+ rxchan = dws->rxchan;
+
+ txchan->device->device_control(txchan, DMA_RESUME, 0);
+ rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+
+ return 0;
+}
+
static struct dw_spi_dma_ops mid_dma_ops = {
.dma_init = mid_spi_dma_init,
.dma_exit = mid_spi_dma_exit,
.dma_transfer = mid_spi_dma_transfer,
+ .dma_suspend = mid_spi_dma_suspend,
+ .dma_resume = mid_spi_dma_resume,
};
#endif
#define CLK_SPI_CDIV_MASK 0x00000e00
#define CLK_SPI_DISABLE_OFFSET 8
-int dw_spi_mid_init(struct dw_spi *dws)
+int dw_spi_mid_init(struct dw_spi *dws, int bus_num)
{
void __iomem *clk_reg;
u32 clk_cdiv;
- clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+ clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG + bus_num * 4, 16);
if (!clk_reg)
return -ENOMEM;
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
}
dws->parent_dev = &pdev->dev;
- dws->bus_num = 0;
+ dws->bus_num = ent->driver_data;
dws->num_cs = 4;
dws->irq = pdev->irq;
* Specific handling for Intel MID paltforms, like dma setup,
* clock rate, FIFO depth.
*/
- if (pdev->device == 0x0800) {
- ret = dw_spi_mid_init(dws);
- if (ret)
- goto err_unmap;
- }
+ ret = dw_spi_mid_init(dws, ent->driver_data);
+ if (ret)
+ goto err_unmap;
ret = dw_spi_add_host(dws);
if (ret)
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
return 0;
err_unmap:
pci_set_drvdata(pdev, NULL);
dw_spi_remove_host(&dwpci->dws);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
iounmap(dwpci->dws.regs);
pci_release_region(pdev, 0);
kfree(dwpci);
}
#ifdef CONFIG_PM
-static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int spi_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
int ret;
return ret;
pci_save_state(pdev);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
return ret;
}
-static int spi_resume(struct pci_dev *pdev)
+static int spi_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
int ret;
return ret;
return dw_spi_resume_host(&dwpci->dws);
}
+
+static int spi_dw_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ dev_dbg(dev, "PCI runtime suspend called\n");
+ return dw_spi_suspend_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ dev_dbg(dev, "pci_runtime_resume called\n");
+ return dw_spi_resume_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_idle(struct device *dev)
+{
+ int err;
+
+ dev_dbg(dev, "pci_runtime_idle called\n");
+ if (system_state == SYSTEM_BOOTING)
+ /* if SPI UART is set as default console and earlyprintk
+ * is enabled, it cannot shutdown SPI controller during booting.
+ */
+ err = pm_schedule_suspend(dev, 30000);
+ else
+ err = pm_schedule_suspend(dev, 500);
+
+ if (err != 0)
+ return 0;
+
+ return -EBUSY;
+}
+
#else
#define spi_suspend NULL
#define spi_resume NULL
+#define spi_dw_pci_runtime_suspend NULL
+#define spi_dw_pci_runtime_resume NULL
+#define spi_dw_pci_runtime_idle NULL
#endif
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
- /* Intel MID platform SPI controller 0 */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+ /* Intel Medfield platform SPI controller 1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800), .driver_data = 0 },
+ /* Intel Cloverview platform SPI controller 1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08E1), .driver_data = 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08EE), .driver_data = 1 },
+ /* Intel EVx platform SPI controller 1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0812), .driver_data = 2 },
{},
};
+static const struct dev_pm_ops dw_spi_pm_ops = {
+ .suspend = spi_suspend,
+ .resume = spi_resume,
+ .runtime_suspend = spi_dw_pci_runtime_suspend,
+ .runtime_resume = spi_dw_pci_runtime_resume,
+ .runtime_idle = spi_dw_pci_runtime_idle,
+};
+
static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = spi_pci_probe,
.remove = spi_pci_remove,
- .suspend = spi_suspend,
- .resume = spi_resume,
+ .driver = {
+ .pm = &dw_spi_pm_ops,
+ },
};
-module_pci_driver(dw_spi_driver);
+static int __init mrst_spi_init(void)
+{
+ return pci_register_driver(&dw_spi_driver);
+}
+
+static void __exit mrst_spi_exit(void)
+{
+ pci_unregister_driver(&dw_spi_driver);
+}
+
+module_init(mrst_spi_init);
+module_exit(mrst_spi_exit);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include "spi-dw.h"
};
#ifdef CONFIG_DEBUG_FS
+static int spi_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
#define SPI_REGS_BUFSIZE 1024
static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
static const struct file_operations mrst_spi_regs_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
+ .open = spi_show_regs_open,
.read = spi_show_regs,
.llseek = default_llseek,
};
u16 txw = 0;
while (max--) {
- /* Set the tx word if the transfer's original "tx" is not null */
+ /* Set the txw if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) {
if (dws->n_bytes == 1)
txw = *(u8 *)(dws->tx);
if (dws->cur_transfer->rx_dma)
dws->rx_dma = dws->cur_transfer->rx_dma;
+ /* map dma buffer if it's not mapped */
+ if (!dws->tx_dma) {
+ dws->tx_dma = dma_map_single(NULL, dws->tx,
+ dws->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(NULL, dws->tx_dma)) {
+ pr_err("map tx dma buffer failed\n");
+ goto err1;
+ }
+ }
+
+ if (!dws->rx_dma) {
+ dws->rx_dma = dma_map_single(NULL, dws->rx,
+ dws->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(NULL, dws->rx_dma)) {
+ pr_err("map rx dma buffer failed\n");
+ goto err2;
+ }
+ }
+
return 1;
+
+err2:
+ dma_unmap_single(NULL, dws->tx_dma, dws->len, DMA_TO_DEVICE);
+err1:
+ dws->cur_msg->is_dma_mapped = 0;
+ return 0;
+}
+
+static void unmap_dma_buffers(struct dw_spi *dws)
+{
+ dma_unmap_single(NULL, dws->rx_dma,
+ dws->len, DMA_FROM_DEVICE);
+ dma_unmap_single(NULL, dws->tx_dma,
+ dws->len, DMA_TO_DEVICE);
}
/* Caller already set message->status; dma and pio irqs are blocked */
struct spi_message *msg;
spin_lock_irqsave(&dws->lock, flags);
+
+ if (dws->dma_mapped)
+ unmap_dma_buffers(dws);
+
msg = dws->cur_msg;
+ list_del_init(&dws->cur_msg->queue);
dws->cur_msg = NULL;
dws->cur_transfer = NULL;
dws->prev_chip = dws->cur_chip;
giveback(dws);
} else
tasklet_schedule(&dws->pump_transfers);
+
}
EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
dw_readw(dws, DW_SPI_TXOICR);
dw_readw(dws, DW_SPI_RXOICR);
dw_readw(dws, DW_SPI_RXUICR);
- int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+ int_error_stop(dws, "interrupt_transfer: fifo over/underrun");
return IRQ_HANDLED;
}
txint_level = dws->fifo_len / 2;
txint_level = (templen > txint_level) ? txint_level : templen;
- imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
+ imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI
+ | SPI_INT_RXOI;
dws->transfer_handler = interrupt_transfer;
}
* 2. clk_div is changed
* 3. control value changes
*/
- if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
+ if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change
+ || clk_div || imask) {
spi_enable_chip(dws, 0);
if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
container_of(work, struct dw_spi, pump_messages);
unsigned long flags;
+ pm_runtime_get_sync(dws->parent_dev);
+
/* Lock queue and check for queue work */
spin_lock_irqsave(&dws->lock, flags);
- if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
- dws->busy = 0;
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
+ if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED)
+ goto exit;
/* Make sure we are not already running a message */
- if (dws->cur_msg) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
+ if (dws->cur_msg)
+ goto exit;
/* Extract head of queue */
dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
- list_del_init(&dws->cur_msg->queue);
/* Initial message state*/
dws->cur_msg->state = START_STATE;
/* Mark as busy and launch transfers */
tasklet_schedule(&dws->pump_transfers);
- dws->busy = 1;
+exit:
spin_unlock_irqrestore(&dws->lock, flags);
+ pm_runtime_put_sync(dws->parent_dev);
}
/* spi_device use this to queue in their spi_msg */
spin_lock_irqsave(&dws->lock, flags);
- if (dws->run == QUEUE_STOPPED) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return -ESHUTDOWN;
- }
-
msg->actual_length = 0;
msg->status = -EINPROGRESS;
msg->state = START_STATE;
list_add_tail(&msg->queue, &dws->queue);
- if (dws->run == QUEUE_RUNNING && !dws->busy) {
-
- if (dws->cur_transfer || dws->cur_msg)
- queue_work(dws->workqueue,
- &dws->pump_messages);
- else {
- /* If no other data transaction in air, just go */
- spin_unlock_irqrestore(&dws->lock, flags);
- pump_messages(&dws->pump_messages);
- return 0;
- }
- }
+ queue_work(dws->workqueue, &dws->pump_messages);
spin_unlock_irqrestore(&dws->lock, flags);
return 0;
kfree(chip);
}
-static int init_queue(struct dw_spi *dws)
+static int dw_spi_init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
spin_lock_init(&dws->lock);
dws->run = QUEUE_STOPPED;
- dws->busy = 0;
tasklet_init(&dws->pump_transfers,
pump_transfers, (unsigned long)dws);
return 0;
}
-static int start_queue(struct dw_spi *dws)
+static int dw_spi_start_queue(struct dw_spi *dws)
{
unsigned long flags;
spin_lock_irqsave(&dws->lock, flags);
- if (dws->run == QUEUE_RUNNING || dws->busy) {
+ if (dws->run == QUEUE_RUNNING) {
spin_unlock_irqrestore(&dws->lock, flags);
return -EBUSY;
}
return 0;
}
-static int stop_queue(struct dw_spi *dws)
+int dw_spi_stop_queue(struct dw_spi *dws)
{
unsigned long flags;
- unsigned limit = 50;
int status = 0;
spin_lock_irqsave(&dws->lock, flags);
- dws->run = QUEUE_STOPPED;
- while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
- spin_unlock_irqrestore(&dws->lock, flags);
- msleep(10);
- spin_lock_irqsave(&dws->lock, flags);
- }
-
- if (!list_empty(&dws->queue) || dws->busy)
+ if (!list_empty(&dws->queue))
status = -EBUSY;
+ else
+ dws->run = QUEUE_STOPPED;
spin_unlock_irqrestore(&dws->lock, flags);
return status;
}
+EXPORT_SYMBOL_GPL(dw_spi_stop_queue);
-static int destroy_queue(struct dw_spi *dws)
+static int dw_spi_destroy_queue(struct dw_spi *dws)
{
int status;
- status = stop_queue(dws);
+ status = dw_spi_stop_queue(dws);
if (status != 0)
return status;
destroy_workqueue(dws->workqueue);
}
/* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void dw_spi_hw_init(struct dw_spi *dws)
{
spi_enable_chip(dws, 0);
spi_mask_intr(dws, 0xff);
- spi_enable_chip(dws, 1);
/*
* Try to detect the FIFO depth if not set by interface driver,
dws->fifo_len = (fifo == 257) ? 0 : fifo;
dw_writew(dws, DW_SPI_TXFLTR, 0);
}
+
+ spi_enable_chip(dws, 1);
}
int dw_spi_add_host(struct dw_spi *dws)
master->transfer = dw_spi_transfer;
/* Basic HW init */
- spi_hw_init(dws);
+ dw_spi_hw_init(dws);
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
}
/* Initial and start queue */
- ret = init_queue(dws);
+ ret = dw_spi_init_queue(dws);
if (ret) {
dev_err(&master->dev, "problem initializing queue\n");
goto err_diable_hw;
}
- ret = start_queue(dws);
+ ret = dw_spi_start_queue(dws);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
goto err_diable_hw;
return 0;
err_queue_alloc:
- destroy_queue(dws);
+ dw_spi_destroy_queue(dws);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
err_diable_hw:
mrst_spi_debugfs_remove(dws);
/* Remove the queue */
- status = destroy_queue(dws);
+ status = dw_spi_destroy_queue(dws);
if (status != 0)
dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
"complete, message memory not freed\n");
{
int ret = 0;
- ret = stop_queue(dws);
+ ret = dw_spi_stop_queue(dws);
if (ret)
return ret;
+
spi_enable_chip(dws, 0);
spi_set_clk(dws, 0);
+
+ if (dws->dma_inited)
+ dws->dma_ops->dma_suspend(dws);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
{
int ret;
- spi_hw_init(dws);
- ret = start_queue(dws);
+ if (dws->dma_inited)
+ dws->dma_ops->dma_resume(dws);
+
+ dw_spi_hw_init(dws);
+ ret = dw_spi_start_queue(dws);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_spi_resume_host);
int (*dma_init)(struct dw_spi *dws);
void (*dma_exit)(struct dw_spi *dws);
int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+ int (*dma_suspend)(struct dw_spi *dws);
+ int (*dma_resume)(struct dw_spi *dws);
};
struct dw_spi {
struct work_struct pump_messages;
spinlock_t lock;
struct list_head queue;
- int busy;
int run;
/* Message Transfer pump */
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
extern void dw_spi_xfer_done(struct dw_spi *dws);
+extern int dw_spi_stop_queue(struct dw_spi *dws);
/* platform related setup */
-extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+/* Intel MID platforms */
+extern int dw_spi_mid_init(struct dw_spi *dws, int bus_num);
#endif /* DW_SPI_HEADER_H */
enforce idle time which results in more package C-state residency. The
user interface is exposed via generic thermal framework.
+config SENSORS_THERMAL_MRFLD
+ tristate "Thermal driver for Intel Merrifield platform"
+ depends on THERMAL && IIO && IIO_BASINCOVE_GPADC
+ help
+ Say Y here to enable thermal driver on Intel Merrifield platform.
+
+ To load this driver as a module, select M here. The module
+ will be called "mrfl_thermal"
+
+config INTEL_BYT_EC_THERMAL
+ tristate "Thermal driver for Intel Baytrail platform"
+ depends on THERMAL && INTEL_BYT_EC
+ help
+ Say Y here to enable thermal driver on Intel Baytrail-M platform.
+
+ To load this driver as a module, select M here. The module
+ will be called "byt_ec_thermal"
+
+config SOC_THERMAL
+ tristate "SoC Thermal driver"
+ depends on THERMAL
+ help
+ SoC Thermal driver registers to Generic Thermal Framework.
+ Exposes SoC DTS and aux trip point values through the framework.
+
+ Say Y here to enable thermal driver on Intel Merrifield
+ platform. To load this driver as a module, select M here.
+
endif
# Makefile for sensor chip drivers.
#
+CFLAGS_intel_mrfl_thermal.o := -Werror
+CFLAGS_intel_soc_thermal.o := -Werror
+CFLAGS_thermal_core.o := -Werror
+
obj-$(CONFIG_THERMAL) += thermal_sys.o
thermal_sys-y += thermal_core.o
+obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_BYT_THERMAL) += intel_byt_thermal.o
+obj-$(CONFIG_SENSORS_THERMAL_MRFLD) += intel_mrfl_thermal.o
+obj-$(CONFIG_SOC_THERMAL) += intel_soc_thermal.o
+
# governors
thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
--- /dev/null
+/*
+ * intel_mid_thermal.c - Intel MID platform thermal driver
+ *
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Ananth Krishna <ananth.krishna.r@intel.com>
+ * Author: Durgadoss <durgadoss.r@intel.com>
+ */
+
+#define pr_fmt(fmt) "intel_mid_thermal: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/thermal.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#define DRIVER_NAME "msic_thermal"
+
+/* Cooling device attributes */
+#define SOC_IPC_COMMAND 0xCF
+
+enum {
+ NORMAL = 0,
+ WARNING,
+ ALERT,
+ CRITICAL
+} thermal_state;
+
+enum {
+ SOC_SKIN_NORMAL = 0,
+ SOC_SKIN_WARM = 2,
+ SOC_SKIN_PROCHOT,
+ SOC_MAX_STATES
+} soc_skin_state;
+
+/* MSIC die attributes */
+#define MSIC_DIE_ADC_MIN 488
+#define MSIC_DIE_ADC_MAX 1004
+
+#define TABLE_LENGTH 24
+/*
+ * ADC code vs Temperature table
+ * This table will be different for different thermistors
+ * Row 0: ADC code
+ * Row 1: Temperature (in degree celsius)
+ */
+static const int adc_code[2][TABLE_LENGTH] = {
+ {977, 961, 941, 917, 887, 853, 813, 769, 720, 669, 615, 561, 508, 456,
+ 407, 357, 315, 277, 243, 212, 186, 162, 140, 107},
+ {-20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60,
+ 65, 70, 75, 80, 85, 90, 100},
+ };
+
+struct ts_cache_info {
+ bool is_cached_data_initialized;
+ struct mutex lock;
+ int *cached_values;
+ unsigned long last_updated;
+};
+
+struct soc_cooling_device_info {
+ unsigned long soc_state;
+ struct mutex lock_cool_state;
+};
+
+static struct soc_cooling_device_info soc_cdev_info;
+
+struct platform_info {
+ struct platform_device *pdev;
+ struct thermal_zone_device **tzd;
+ struct ts_cache_info cacheinfo;
+ /* ADC handle used to read sensor temperature values */
+ void *therm_adc_handle;
+ struct thermal_cooling_device *soc_cdev;
+ int num_sensors;
+ int soc_cooling;
+ struct intel_mid_thermal_sensor *sensors;
+};
+
+static struct platform_info *platforminfo;
+
+struct thermal_device_info {
+ struct intel_mid_thermal_sensor *sensor;
+};
+
+/* SoC cooling device callbacks */
+static int soc_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ /* SoC has 4 levels of throttling from 0 to 3 */
+ *state = SOC_MAX_STATES - 1;
+ return 0;
+}
+
+static int soc_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ mutex_lock(&soc_cdev_info.lock_cool_state);
+ *state = soc_cdev_info.soc_state;
+ mutex_unlock(&soc_cdev_info.lock_cool_state);
+ return 0;
+}
+
+static int soc_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ int ret;
+ if (state > SOC_MAX_STATES - 1) {
+ pr_err("Invalid SoC throttle state:%ld\n", state);
+ return -EINVAL;
+ }
+
+ switch (state) {
+ /* SoC De-Throttle */
+ case NORMAL:
+ state = SOC_SKIN_NORMAL;
+ break;
+ case WARNING:
+ /*
+ * New state is assigned based on present state.
+ * State 1 can be reached from state 0 or 2.
+ * State 0 to 1 means skin WARM.
+ * state 2 to 1 means skin no longer PROCHOT but WARM
+ */
+ state = SOC_SKIN_WARM;
+ break;
+ /* SoC Throttle, PROCHOT */
+ case ALERT:
+ case CRITICAL:
+ state = SOC_SKIN_PROCHOT;
+ break;
+ }
+ /* Send IPC command to throttle SoC */
+ mutex_lock(&soc_cdev_info.lock_cool_state);
+ ret = rpmsg_send_generic_command(SOC_IPC_COMMAND, 0,
+ (u8 *) &state, 4, NULL, 0);
+ if (ret)
+ pr_err("IPC_COMMAND failed: %d\n", ret);
+ else
+ soc_cdev_info.soc_state = state;
+
+ mutex_unlock(&soc_cdev_info.lock_cool_state);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops soc_cooling_ops = {
+ .get_max_state = soc_get_max_state,
+ .get_cur_state = soc_get_cur_state,
+ .set_cur_state = soc_set_cur_state,
+};
+
+static int register_soc_as_cdev(void)
+{
+ int ret = 0;
+ platforminfo->soc_cdev = thermal_cooling_device_register("SoC", NULL,
+ &soc_cooling_ops);
+ if (IS_ERR(platforminfo->soc_cdev)) {
+ ret = PTR_ERR(platforminfo->soc_cdev);
+ platforminfo->soc_cdev = NULL;
+ }
+ return ret;
+}
+
+static void unregister_soc_as_cdev(void)
+{
+ thermal_cooling_device_unregister(platforminfo->soc_cdev);
+}
+
+/**
+ * is_valid_adc - checks whether the adc code is within the defined range
+ * @min: minimum value for the sensor
+ * @max: maximum value for the sensor
+ *
+ * Can sleep
+ */
+static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
+{
+ return (adc_val >= min) && (adc_val <= max);
+}
+
+/**
+ * find_adc_code - searches the ADC code using binary search
+ * @val: value to find in the array
+ *
+ * This function does binary search on an array sorted in 'descending' order
+ * Can sleep
+ */
+static int find_adc_code(uint16_t val)
+{
+ int left = 0;
+ int right = TABLE_LENGTH - 1;
+ int mid;
+ while (left <= right) {
+ mid = (left + right)/2;
+ if (val == adc_code[0][mid] ||
+ (mid > 0 &&
+ val > adc_code[0][mid] && val < adc_code[0][mid-1]))
+ return mid;
+ else if (val > adc_code[0][mid])
+ right = mid - 1;
+ else if (val < adc_code[0][mid])
+ left = mid + 1;
+ }
+ return -1;
+}
+
+/**
+ * linear_interpolate - does interpolation to find temperature
+ * Returns the temperature in milli degree celsius
+ * @adc_val: ADC code(x) at which temperature(y) should be found
+ * @indx: index of the minimum(x0) of the two ADC codes
+ *
+ * Can sleep
+ */
+static int linear_interpolate(int indx, uint16_t adc_val)
+{
+ int x = adc_val;
+ int x0 = adc_code[0][indx];
+ int x1 = adc_code[0][indx - 1];
+ int y0 = adc_code[1][indx];
+ int y1 = adc_code[1][indx - 1];
+
+ /*
+ * Find y:
+ * Of course, we can avoid these variables, but keep them
+ * for readability and maintainability.
+ */
+ int numerator = (x-x0)*y1 + (x1-x)*y0;
+ int denominator = x1-x0;
+
+ /*
+ * We have to report the temperature in milli degree celsius.
+ * So, to reduce the loss of precision, do (Nr*1000)/Dr, instead
+ * of (Nr/Dr)*1000.
+ */
+ return (numerator * 1000)/denominator;
+}
+
+/**
+ * adc_to_temp - converts the ADC code to temperature in C
+ * @direct: true if ths channel is direct index
+ * @adc_val: the adc_val that needs to be converted
+ * @tp: temperature return value
+ *
+ * Can sleep
+ */
+static int adc_to_temp(struct intel_mid_thermal_sensor *sensor,
+ uint16_t adc_val, long *tp)
+{
+ int indx;
+
+ /* Direct conversion for msic die temperature */
+ if (sensor->direct) {
+ if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
+ *tp = sensor->slope * adc_val - sensor->intercept;
+ return 0;
+ }
+ return -ERANGE;
+ }
+
+ indx = find_adc_code(adc_val);
+ if (indx < 0)
+ return -ERANGE;
+
+ if (adc_code[0][indx] == adc_val) {
+ /* Convert temperature in celsius to milli degree celsius */
+ *tp = adc_code[1][indx] * 1000;
+ return 0;
+ }
+
+ /*
+ * The ADC code is in between two values directly defined in the
+ * table. So, do linear interpolation to calculate the temperature.
+ */
+ *tp = linear_interpolate(indx, adc_val);
+ return 0;
+}
+
+int skin0_temp_correlation(void *info, long temp, long *res)
+{
+ struct intel_mid_thermal_sensor *sensor = info;
+
+ *res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+ return 0;
+}
+
+int bptherm_temp_correlation(void *info, long temp, long *res)
+{
+ struct intel_mid_thermal_sensor *sensor = info;
+
+ *res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+ return 0;
+}
+
+int skin1_temp_correlation(void *info, long temp, long *res)
+{
+ struct intel_mid_thermal_sensor *sensor = info;
+ struct intel_mid_thermal_sensor *dsensor; /* dependent sensor */
+ struct skin1_private_info *skin_info;
+ long sensor_temp = 0, curr_temp;
+ int ret, index;
+
+ skin_info = sensor->priv;
+
+ *res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+ /* If we do not have dependent sensors, just return. Not an error */
+ if (!skin_info || !skin_info->dependent || !skin_info->sensors)
+ return 0;
+
+ for (index = 0; index < skin_info->dependent; index++) {
+ if (!skin_info->sensors[index])
+ continue;
+
+ dsensor = skin_info->sensors[index];
+
+ ret = adc_to_temp(dsensor,
+ platforminfo->cacheinfo.cached_values[dsensor->index],
+ &curr_temp);
+ if (ret)
+ return ret;
+
+ if (dsensor->temp_correlation)
+ dsensor->temp_correlation(dsensor, curr_temp,
+ &sensor_temp);
+
+ if (sensor_temp > *res)
+ *res = sensor_temp;
+ }
+
+ return 0;
+}
+
+/**
+ * mid_read_temp - read sensors for temperature
+ * @temp: holds the current temperature for the sensor after reading
+ *
+ * reads the adc_code from the channel and converts it to real
+ * temperature. The converted value is stored in temp.
+ *
+ * Can sleep
+ */
+static int mid_read_temp(struct thermal_zone_device *tzd, long *temp)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+ int ret;
+ long curr_temp;
+ int indx = td_info->sensor->index; /* Required Index */
+
+ mutex_lock(&platforminfo->cacheinfo.lock);
+
+ if (!platforminfo->cacheinfo.is_cached_data_initialized ||
+ time_after(jiffies, platforminfo->cacheinfo.last_updated + HZ)) {
+ ret = get_gpadc_sample(platforminfo->therm_adc_handle, 1,
+ platforminfo->cacheinfo.cached_values);
+ if (ret)
+ goto exit;
+ platforminfo->cacheinfo.last_updated = jiffies;
+ platforminfo->cacheinfo.is_cached_data_initialized = true;
+ }
+
+ /* Convert ADC value to temperature */
+ ret = adc_to_temp(td_info->sensor,
+ platforminfo->cacheinfo.cached_values[indx], &curr_temp);
+ if (ret)
+ goto exit;
+
+ if (td_info->sensor->temp_correlation)
+ ret = td_info->sensor->temp_correlation(td_info->sensor,
+ curr_temp, temp);
+ else
+ *temp = curr_temp;
+
+exit:
+ mutex_unlock(&platforminfo->cacheinfo.lock);
+ return ret;
+}
+
+/**
+ * initialize_sensor - Initializes ADC information for each sensor.
+ * @index: index of the sensor
+ *
+ * Context: can sleep
+ */
+static struct thermal_device_info *initialize_sensor(
+ struct intel_mid_thermal_sensor *sensor)
+{
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+
+ td_info->sensor = sensor;
+
+ return td_info;
+}
+
+/**
+ * mid_thermal_resume - resume routine
+ * @dev: device structure
+ */
+static int mid_thermal_resume(struct device *dev)
+{
+ return 0;
+}
+
+/**
+ * mid_thermal_suspend - suspend routine
+ * @dev: device structure
+ */
+static int mid_thermal_suspend(struct device *dev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int read_slope(struct thermal_zone_device *tzd, long *slope)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ *slope = td_info->sensor->slope;
+
+ return 0;
+}
+
+static int update_slope(struct thermal_zone_device *tzd, long slope)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ td_info->sensor->slope = slope;
+
+ return 0;
+}
+
+static int read_intercept(struct thermal_zone_device *tzd, long *intercept)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ *intercept = td_info->sensor->intercept;
+
+ return 0;
+}
+
+static int update_intercept(struct thermal_zone_device *tzd, long intercept)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ td_info->sensor->intercept = intercept;
+
+ return 0;
+}
+#endif
+
+/**
+ * read_curr_temp - reads the current temperature and stores in temp
+ * @temp: holds the current temperature value after reading
+ *
+ * Can sleep
+ */
+static int read_curr_temp(struct thermal_zone_device *tzd, long *temp)
+{
+ return (tzd) ? mid_read_temp(tzd, temp) : -EINVAL;
+}
+
+/* Can't be const */
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = read_curr_temp,
+#ifdef CONFIG_DEBUG_THERMAL
+ .get_slope = read_slope,
+ .set_slope = update_slope,
+ .get_intercept = read_intercept,
+ .set_intercept = update_intercept,
+#endif
+};
+
+/**
+ * mid_thermal_probe - mfld thermal initialize
+ * @pdev: platform device structure
+ *
+ * mid thermal probe initializes the hardware and registers
+ * all the sensors with the generic thermal framework. Can sleep.
+ */
+static int mid_thermal_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+ int *adc_channel_info;
+ struct intel_mid_thermal_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ platforminfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+
+ if (!platforminfo)
+ return -ENOMEM;
+
+ platforminfo->num_sensors = pdata->num_sensors;
+ platforminfo->soc_cooling = pdata->soc_cooling;
+ platforminfo->sensors = pdata->sensors;
+
+ platforminfo->tzd = kzalloc(
+ (sizeof(struct thermal_zone_device *) * platforminfo->num_sensors),
+ GFP_KERNEL);
+
+ if (!platforminfo->tzd)
+ goto platforminfo_alloc_fail;
+
+ platforminfo->cacheinfo.cached_values =
+ kzalloc((sizeof(int) * platforminfo->num_sensors), GFP_KERNEL);
+
+ if (!platforminfo->cacheinfo.cached_values)
+ goto tzd_alloc_fail;
+
+ adc_channel_info = kzalloc((sizeof(int) * platforminfo->num_sensors),
+ GFP_KERNEL);
+
+ if (!adc_channel_info)
+ goto cachedinfo_alloc_fail;
+
+ /* initialize mutex locks */
+ mutex_init(&platforminfo->cacheinfo.lock);
+
+
+ if (platforminfo->soc_cooling)
+ mutex_init(&soc_cdev_info.lock_cool_state);
+
+ for (i = 0; i < platforminfo->num_sensors; i++)
+ adc_channel_info[i] = platforminfo->sensors[i].adc_channel;
+
+ /* Allocate ADC channels for all sensors */
+ platforminfo->therm_adc_handle = gpadc_alloc_channels(
+ platforminfo->num_sensors, adc_channel_info);
+
+ if (!platforminfo->therm_adc_handle) {
+ ret = -ENOMEM;
+ goto adc_channel_alloc_fail;
+ }
+
+ /* Register each sensor with the generic thermal framework*/
+ for (i = 0; i < platforminfo->num_sensors; i++) {
+ platforminfo->tzd[i] = thermal_zone_device_register(
+ platforminfo->sensors[i].name, 0, 0,
+ initialize_sensor(&platforminfo->sensors[i]),
+ &tzd_ops, NULL, 0, 0);
+ if (IS_ERR(platforminfo->tzd[i]))
+ goto reg_fail;
+ }
+
+ platforminfo->pdev = pdev;
+
+ platform_set_drvdata(pdev, platforminfo);
+
+ /* Register SoC as a cooling device */
+ if (platforminfo->soc_cooling) {
+ ret = register_soc_as_cdev();
+ /* Log this, but keep the driver loaded */
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register_soc_as_cdev failed:%d\n", ret);
+ }
+ }
+
+ kfree(adc_channel_info);
+
+ return 0;
+
+reg_fail:
+ ret = PTR_ERR(platforminfo->tzd[i]);
+ while (--i >= 0)
+ thermal_zone_device_unregister(platforminfo->tzd[i]);
+adc_channel_alloc_fail:
+ kfree(adc_channel_info);
+cachedinfo_alloc_fail:
+ kfree(platforminfo->cacheinfo.cached_values);
+tzd_alloc_fail:
+ kfree(platforminfo->tzd);
+platforminfo_alloc_fail:
+ kfree(platforminfo);
+ return ret;
+}
+
+/**
+ * mid_thermal_remove - mfld thermal finalize
+ * @dev: platform device structure
+ *
+ * MLFD thermal remove unregisters all the sensors from the generic
+ * thermal framework. Can sleep.
+ */
+static int mid_thermal_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < platforminfo->num_sensors; i++)
+ thermal_zone_device_unregister(platforminfo->tzd[i]);
+
+ /* Unregister SoC as cooling device */
+ if (platforminfo->soc_cooling)
+ unregister_soc_as_cdev();
+
+ /* Free the allocated ADC channels */
+ if (platforminfo->therm_adc_handle)
+ intel_mid_gpadc_free(platforminfo->therm_adc_handle);
+
+ kfree(platforminfo->cacheinfo.cached_values);
+ kfree(platforminfo->tzd);
+ kfree(platforminfo);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+/*********************************************************************
+ * Driver initialisation and finalization
+ *********************************************************************/
+
+
+/* Platfrom device functionality */
+
+static const struct dev_pm_ops msic_thermal_pm_ops = {
+ .suspend = mid_thermal_suspend,
+ .resume = mid_thermal_resume,
+};
+
+static const struct platform_device_id mid_therm_table[] = {
+ { DRIVER_NAME, 1 },
+};
+
+static struct platform_driver mid_therm_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &msic_thermal_pm_ops,
+ },
+ .probe = mid_thermal_probe,
+ .remove = mid_thermal_remove,
+ .id_table = mid_therm_table,
+};
+
+static int mid_therm_module_init(void)
+{
+ return platform_driver_register(&mid_therm_driver);
+}
+
+static void mid_therm_module_exit(void)
+{
+ platform_driver_unregister(&mid_therm_driver);
+}
+
+
+/* RPMSG related functionality */
+
+static int mid_therm_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed mid_therm rpmsg device\n");
+
+ ret = mid_therm_module_init();
+out:
+ return ret;
+}
+
+static void mid_therm_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ mid_therm_module_exit();
+ dev_info(&rpdev->dev, "Removed mid_therm rpmsg device\n");
+}
+
+static void mid_therm_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id mid_therm_id_table[] = {
+ { .name = "rpmsg_mid_thermal" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, mid_therm_id_table);
+
+static struct rpmsg_driver mid_therm_rpmsg_driver = {
+ .drv.name = DRIVER_NAME,
+ .drv.owner = THIS_MODULE,
+ .probe = mid_therm_rpmsg_probe,
+ .callback = mid_therm_rpmsg_cb,
+ .remove = mid_therm_rpmsg_remove,
+ .id_table = mid_therm_id_table,
+};
+
+static int __init mid_therm_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&mid_therm_rpmsg_driver);
+}
+
+static void __exit mid_therm_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&mid_therm_rpmsg_driver);
+}
+
+
+/* Changing _init call to make the thermal driver
+ * load _after_ the GPADC driver
+ * module_init(mid_therm_rpmsg_init);
+ */
+late_initcall(mid_therm_rpmsg_init);
+module_exit(mid_therm_rpmsg_exit);
+
+MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * intel_mrfl_thermal.c - Intel Merrifield Platform Thermal Driver
+ *
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * DEVICE_NAME: Intel Merrifield platform - PMIC: Thermal Monitor
+ */
+
+#define pr_fmt(fmt) "intel_mrfl_thermal: " fmt
+
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_basincove_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <linux/iio/consumer.h>
+
+#define DRIVER_NAME "bcove_thrm"
+
+/* Number of Thermal sensors on the PMIC */
+#define PMIC_THERMAL_SENSORS 4
+
+/* Registers that govern Thermal Monitoring */
+#define THRMMONCFG 0xB3
+#define THRMMONCTL 0xB4
+#define THRMIRQ 0x04
+#define MTHRMIRQ 0x0F
+#define STHRMIRQ 0xB2
+#define IRQLVL1 0x01
+#define MIRQLVL1 0x0C
+#define IRQ_MASK_ALL 0x0F
+
+/* PMIC SRAM base address and offset for Thermal register */
+#define PMIC_SRAM_BASE_ADDR 0xFFFFF610
+#define PMIC_SRAM_THRM_OFFSET 0x03
+#define IOMAP_SIZE 0x04
+
+/* NVM BANK REGISTER */
+#define EEPROM_CTRL 0x1FE
+#define EEPROM_REG15 0x1EE
+#define EEPROM_BANK1_SELECT 0x02
+#define EEPROM_BANK1_UNSELECT 0x00
+
+#define PMICALRT (1 << 3)
+#define SYS2ALRT (1 << 2)
+#define SYS1ALRT (1 << 1)
+#define SYS0ALRT (1 << 0)
+#define THERM_EN (1 << 0)
+#define THERM_ALRT (1 << 2)
+
+/* ADC to Temperature conversion table length */
+#define TABLE_LENGTH 34
+#define TEMP_INTERVAL 5
+
+/* Default _max 85 C */
+#define DEFAULT_MAX_TEMP 85
+
+/* Constants defined in BasinCove PMIC spec */
+#define PMIC_DIE_ADC_MIN 395
+#define PMIC_DIE_ADC_MAX 661
+#define PMIC_DIE_TEMP_MIN -40
+#define PMIC_DIE_TEMP_MAX 125
+#define ADC_VAL_27C 470
+#define ADC_COEFFICIENT 675
+#define TEMP_OFFSET 27000
+
+/* 'enum' of Thermal sensors */
+enum thermal_sensors { SYS0, SYS1, SYS2, PMIC_DIE, _COUNT };
+
+/*
+ * Alert registers store the 'alert' temperature for each sensor,
+ * as 10 bit ADC code. The higher two bits are stored in bits[0:1] of
+ * alert_regs_h. The lower eight bits are stored in alert_regs_l.
+ * The hysteresis value is stored in bits[2:6] of alert_regs_h.
+ * Order: SYS0 SYS1 SYS2 PMIC_DIE
+ *
+ * static const int alert_regs_l[] = { 0xB7, 0xB9, 0xBB, 0xC1 };
+ */
+static const int alert_regs_h[] = { 0xB6, 0xB8, 0xBA, 0xC0 };
+
+/*
+ * ADC code vs Temperature table
+ * This table will be different for different thermistors
+ * Row 0: ADC code
+ * Row 1: Temperature (in degree celsius)
+ */
+static const int adc_code[2][TABLE_LENGTH] = {
+ {952, 932, 906, 877, 843, 804, 761, 714, 665, 614,
+ 563, 512, 462, 415, 370, 329, 291, 257, 226, 199,
+ 174, 153, 135, 119, 104, 92, 81, 72, 64, 56,
+ 50, 45, 40, 36},
+ {-40, -35, -30, -25, -20, -15, -10, -5, 0, 5,
+ 10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
+ 60, 65, 70, 75, 80, 85, 90, 95, 100, 105,
+ 110, 115, 120, 125},
+ };
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct thermal_device_info {
+ struct intel_mid_thermal_sensor *sensor;
+};
+
+struct thermal_data {
+ struct platform_device *pdev;
+ struct iio_channel *iio_chan;
+ struct thermal_zone_device **tzd;
+ void *thrm_addr;
+ unsigned int irq;
+ /* Caching information */
+ bool is_initialized;
+ unsigned long last_updated;
+ int cached_vals[PMIC_THERMAL_SENSORS];
+ int num_sensors;
+ struct intel_mid_thermal_sensor *sensors;
+};
+static struct thermal_data *tdata;
+
+static inline int adc_to_pmic_die_temp(unsigned int val)
+{
+ /* return temperature in mC */
+ return (val - ADC_VAL_27C) * ADC_COEFFICIENT + TEMP_OFFSET;
+}
+
+static inline int pmic_die_temp_to_adc(int temp)
+{
+ /* 'temp' is in C, convert to mC and then do calculations */
+ return ((temp * 1000) - TEMP_OFFSET) / ADC_COEFFICIENT + ADC_VAL_27C;
+}
+
+/**
+ * find_adc_code - searches the ADC code using binary search
+ * @val: value to find in the array
+ *
+ * This function does binary search on an array sorted in 'descending' order
+ * Can sleep
+ */
+static int find_adc_code(uint16_t val)
+{
+ int left = 0;
+ int right = TABLE_LENGTH - 1;
+ int mid;
+ while (left <= right) {
+ mid = (left + right)/2;
+ if (val == adc_code[0][mid] ||
+ (mid > 0 &&
+ val > adc_code[0][mid] && val < adc_code[0][mid-1]))
+ return mid;
+ else if (val > adc_code[0][mid])
+ right = mid - 1;
+ else if (val < adc_code[0][mid])
+ left = mid + 1;
+ }
+ return -EINVAL;
+}
+
+/**
+ * adc_to_temp - converts the ADC code to temperature in mC
+ * @direct: true if the sensor uses direct conversion
+ * @adc_val: the ADC code to be converted
+ * @tp: temperature return value
+ *
+ * Can sleep
+ */
+static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
+{
+ int x0, x1, y0, y1;
+ int nr, dr; /* Numerator & Denominator */
+ int indx;
+ int x = adc_val;
+ int8_t pmic_temp_offset;
+
+ /* Direct conversion for pmic die temperature */
+ if (direct) {
+ if (adc_val < PMIC_DIE_ADC_MIN || adc_val > PMIC_DIE_ADC_MAX)
+ return -EINVAL;
+
+ /* An offset added for pmic temp from NVM in TNG B0 */
+ intel_scu_ipc_iowrite8(EEPROM_CTRL, EEPROM_BANK1_SELECT);
+ intel_scu_ipc_ioread8(EEPROM_REG15, &pmic_temp_offset);
+ intel_scu_ipc_iowrite8(EEPROM_CTRL, EEPROM_BANK1_UNSELECT);
+
+ adc_val = adc_val + pmic_temp_offset;
+
+ *tp = adc_to_pmic_die_temp(adc_val);
+ return 0;
+ }
+
+ indx = find_adc_code(adc_val);
+ if (indx < 0)
+ return -EINVAL;
+
+ if (adc_code[0][indx] == adc_val) {
+ *tp = adc_code[1][indx] * 1000;
+ return 0;
+ }
+
+ /*
+ * The ADC code is in between two values directly defined in the
+ * table. So, do linear interpolation to calculate the temperature.
+ */
+ x0 = adc_code[0][indx];
+ x1 = adc_code[0][indx - 1];
+ y0 = adc_code[1][indx];
+ y1 = adc_code[1][indx - 1];
+
+ /*
+ * Find y:
+ * Of course, we can avoid these variables, but keep them
+ * for readability and maintainability.
+ */
+ nr = (x-x0)*y1 + (x1-x)*y0;
+ dr = x1-x0;
+
+ if (!dr)
+ return -EINVAL;
+ /*
+ * We have to report the temperature in milli degree celsius.
+ * So, to reduce the loss of precision, do (Nr*1000)/Dr, instead
+ * of (Nr/Dr)*1000.
+ */
+ *tp = (nr * 1000)/dr;
+
+ return 0;
+}
+
+/**
+ * temp_to_adc - converts the temperature(in C) to ADC code
+ * @direct: true if the sensor uses direct conversion
+ * @temp: the temperature to be converted
+ * @adc_val: ADC code return value
+ *
+ * Can sleep
+ */
+static int temp_to_adc(int direct, int temp, int *adc_val)
+{
+ int indx;
+ int x0, x1, y0, y1;
+ int nr, dr; /* Numerator & Denominator */
+ int x = temp;
+
+ /* Direct conversion for pmic die temperature */
+ if (direct) {
+ if (temp < PMIC_DIE_TEMP_MIN || temp > PMIC_DIE_TEMP_MAX)
+ return -EINVAL;
+
+ *adc_val = pmic_die_temp_to_adc(temp);
+ return 0;
+ }
+
+ if (temp < adc_code[1][0] || temp > adc_code[1][TABLE_LENGTH - 1])
+ return -EINVAL;
+
+
+ /* Find the 'indx' of this 'temp' in the table */
+ indx = (temp - adc_code[1][0]) / TEMP_INTERVAL;
+
+ if (temp == adc_code[1][indx]) {
+ *adc_val = adc_code[0][indx];
+ return 0;
+ }
+
+ /*
+ * Temperature is not a multiple of 'TEMP_INTERVAL'. So,
+ * do linear interpolation to obtain a better ADC code.
+ */
+ x0 = adc_code[1][indx];
+ x1 = adc_code[1][indx + 1];
+ y0 = adc_code[0][indx];
+ y1 = adc_code[0][indx + 1];
+
+ nr = (x-x0)*y1 + (x1-x)*y0;
+ dr = x1-x0;
+
+ if (!dr)
+ return -EINVAL;
+
+ *adc_val = nr/dr;
+
+ return 0;
+}
+
+/**
+ * set_tmax - sets the given 'adc_val' to the 'alert_reg'
+ * @alert_reg: register address
+ * @adc_val: ADC value to be programmed
+ *
+ * Not protected. Calling function should handle synchronization.
+ * Can sleep
+ */
+static int set_tmax(int alert_reg, int adc_val)
+{
+ int ret;
+
+ /* Set bits[0:1] of alert_reg_h to bits[8:9] of 'adc_val' */
+ ret = intel_scu_ipc_update_register(alert_reg, (adc_val >> 8), 0x03);
+ if (ret)
+ return ret;
+
+ /* Extract bits[0:7] of 'adc_val' and write them into alert_reg_l */
+ return intel_scu_ipc_iowrite8(alert_reg + 1, adc_val & 0xFF);
+}
+
+/**
+ * program_tmax - programs a default _max value for each sensor
+ * @dev: device pointer
+ *
+ * Can sleep
+ */
+static int program_tmax(struct device *dev)
+{
+ int i, ret;
+ int pmic_die_val, adc_val;
+
+ ret = temp_to_adc(0, DEFAULT_MAX_TEMP, &adc_val);
+ if (ret)
+ return ret;
+
+ ret = temp_to_adc(1, DEFAULT_MAX_TEMP, &pmic_die_val);
+ if (ret)
+ return ret;
+ /*
+ * Since this function sets max value, do for all sensors even if
+ * the sensor does not register as a thermal zone.
+ */
+ for (i = 0; i < PMIC_THERMAL_SENSORS - 1; i++) {
+ ret = set_tmax(alert_regs_h[i], adc_val);
+ if (ret)
+ goto exit_err;
+ }
+
+ /* Set _max for pmic die sensor */
+ ret = set_tmax(alert_regs_h[i], pmic_die_val);
+ if (ret)
+ goto exit_err;
+
+ return ret;
+
+exit_err:
+ dev_err(dev, "set_tmax for channel %d failed:%d\n", i, ret);
+ return ret;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+ int trip, long hyst)
+{
+ int ret;
+ uint8_t data;
+ struct thermal_device_info *td_info = tzd->devdata;
+ int alert_reg = alert_regs_h[td_info->sensor->index];
+
+ /* Hysteresis value is 5 bits wide */
+ if (hyst > 31)
+ return -EINVAL;
+
+ mutex_lock(&thrm_update_lock);
+
+ ret = intel_scu_ipc_ioread8(alert_reg, &data);
+ if (ret)
+ goto ipc_fail;
+
+ /* Set bits [2:6] to value of hyst */
+ data = (data & 0x83) | (hyst << 2);
+
+ ret = intel_scu_ipc_iowrite8(alert_reg, data);
+
+ipc_fail:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+ int trip, long *hyst)
+{
+ int ret;
+ uint8_t data;
+ struct thermal_device_info *td_info = tzd->devdata;
+ int alert_reg = alert_regs_h[td_info->sensor->index];
+
+ mutex_lock(&thrm_update_lock);
+
+ ret = intel_scu_ipc_ioread8(alert_reg, &data);
+ if (!ret)
+ *hyst = (data >> 2) & 0x1F; /* Extract bits[2:6] of data */
+
+ mutex_unlock(&thrm_update_lock);
+
+ return ret;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+ int trip, long trip_temp)
+{
+ int ret, adc_val;
+ struct thermal_device_info *td_info = tzd->devdata;
+ int alert_reg = alert_regs_h[td_info->sensor->index];
+
+ if (trip_temp < 1000) {
+ dev_err(&tzd->device, "Temperature should be in mC\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&thrm_update_lock);
+
+ /* Convert from mC to C */
+ trip_temp /= 1000;
+
+ ret = temp_to_adc(td_info->sensor->direct, (int)trip_temp, &adc_val);
+ if (ret)
+ goto exit;
+
+ ret = set_tmax(alert_reg, adc_val);
+exit:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+ int trip, long *trip_temp)
+{
+ int ret, adc_val;
+ uint8_t l, h;
+ struct thermal_device_info *td_info = tzd->devdata;
+ int alert_reg = alert_regs_h[td_info->sensor->index];
+
+ mutex_lock(&thrm_update_lock);
+
+ ret = intel_scu_ipc_ioread8(alert_reg, &h);
+ if (ret)
+ goto exit;
+
+ ret = intel_scu_ipc_ioread8(alert_reg + 1, &l);
+ if (ret)
+ goto exit;
+
+ /* Concatenate 'h' and 'l' to get 10-bit ADC code */
+ adc_val = ((h & 0x03) << 8) | l;
+
+ ret = adc_to_temp(td_info->sensor->direct, adc_val, trip_temp);
+exit:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+ int trip, enum thermal_trip_type *trip_type)
+{
+ /* All are passive trip points */
+ *trip_type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+ int ret;
+ struct thermal_device_info *td_info = tzd->devdata;
+ int indx = td_info->sensor->index;
+
+ if (!tdata->iio_chan)
+ return -EINVAL;
+
+ mutex_lock(&thrm_update_lock);
+
+ if (!tdata->is_initialized ||
+ time_after(jiffies, tdata->last_updated + HZ)) {
+ ret = iio_read_channel_all_raw(tdata->iio_chan,
+ tdata->cached_vals);
+ if (ret) {
+ dev_err(&tzd->device, "ADC sampling failed:%d\n", ret);
+ goto exit;
+ }
+ tdata->last_updated = jiffies;
+ tdata->is_initialized = true;
+ }
+
+ ret = adc_to_temp(td_info->sensor->direct, tdata->cached_vals[indx],
+ temp);
+ if (ret)
+ goto exit;
+
+ if (td_info->sensor->temp_correlation)
+ ret = td_info->sensor->temp_correlation(td_info->sensor,
+ *temp, temp);
+exit:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int read_slope(struct thermal_zone_device *tzd, long *slope)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ *slope = td_info->sensor->slope;
+
+ return 0;
+}
+
+static int update_slope(struct thermal_zone_device *tzd, long slope)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ td_info->sensor->slope = slope;
+
+ return 0;
+}
+
+static int read_intercept(struct thermal_zone_device *tzd, long *intercept)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ *intercept = td_info->sensor->intercept;
+
+ return 0;
+}
+
+static int update_intercept(struct thermal_zone_device *tzd, long intercept)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ td_info->sensor->intercept = intercept;
+
+ return 0;
+}
+#endif
+
+static int enable_tm(void)
+{
+ int ret;
+ uint8_t data;
+
+ mutex_lock(&thrm_update_lock);
+
+ ret = intel_scu_ipc_ioread8(THRMMONCTL, &data);
+ if (ret)
+ goto ipc_fail;
+
+ ret = intel_scu_ipc_iowrite8(THRMMONCTL, data | THERM_EN);
+
+ipc_fail:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+static struct thermal_device_info *initialize_sensor(
+ struct intel_mid_thermal_sensor *sensor)
+{
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+
+ td_info->sensor = sensor;
+
+ return td_info;
+}
+
+static irqreturn_t thermal_intrpt(int irq, void *dev_data)
+{
+ int ret, sensor, event_type;
+ uint8_t irq_status;
+ unsigned int irq_data;
+ struct thermal_data *tdata = (struct thermal_data *)dev_data;
+
+ if (!tdata)
+ return IRQ_NONE;
+
+ mutex_lock(&thrm_update_lock);
+
+ irq_data = ioread8(tdata->thrm_addr + PMIC_SRAM_THRM_OFFSET);
+
+ ret = intel_scu_ipc_ioread8(STHRMIRQ, &irq_status);
+ if (ret)
+ goto ipc_fail;
+
+ dev_dbg(&tdata->pdev->dev, "STHRMIRQ: %.2x\n", irq_status);
+
+ /*
+ * -1 for invalid interrupt
+ * 1 for LOW to HIGH temperature alert
+ * 0 for HIGH to LOW temperature alert
+ */
+ event_type = -1;
+
+ /* Check which interrupt occured and for what event */
+ if (irq_data & PMICALRT) {
+ event_type = !!(irq_status & PMICALRT);
+ sensor = PMIC_DIE;
+ } else if (irq_data & SYS2ALRT) {
+ event_type = !!(irq_status & SYS2ALRT);
+ sensor = SYS2;
+ } else if (irq_data & SYS1ALRT) {
+ event_type = !!(irq_status & SYS1ALRT);
+ sensor = SYS1;
+ } else if (irq_data & SYS0ALRT) {
+ event_type = !!(irq_status & SYS0ALRT);
+ sensor = SYS0;
+ } else {
+ dev_err(&tdata->pdev->dev, "Invalid Interrupt\n");
+ ret = IRQ_HANDLED;
+ goto ipc_fail;
+ }
+
+ if (event_type != -1) {
+ dev_info(&tdata->pdev->dev,
+ "%s interrupt for thermal sensor %d\n",
+ event_type ? "HIGH" : "LOW", sensor);
+ }
+
+ /* Notify using UEvent */
+ kobject_uevent(&tdata->pdev->dev.kobj, KOBJ_CHANGE);
+
+ /* Unmask Thermal Interrupt in the mask register */
+ ret = intel_scu_ipc_update_register(MIRQLVL1, 0xFF, THERM_ALRT);
+ if (ret)
+ goto ipc_fail;
+
+ ret = IRQ_HANDLED;
+
+ipc_fail:
+ mutex_unlock(&thrm_update_lock);
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = show_temp,
+ .get_trip_type = show_trip_type,
+ .get_trip_temp = show_trip_temp,
+ .set_trip_temp = store_trip_temp,
+ .get_trip_hyst = show_trip_hyst,
+ .set_trip_hyst = store_trip_hyst,
+#ifdef CONFIG_DEBUG_THERMAL
+ .get_slope = read_slope,
+ .set_slope = update_slope,
+ .get_intercept = read_intercept,
+ .set_intercept = update_intercept,
+#endif
+};
+
+static irqreturn_t mrfl_thermal_intrpt_handler(int irq, void* dev_data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int mrfl_thermal_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct intel_mid_thermal_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not found\n");
+ return -EINVAL;
+ }
+
+ tdata = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+ if (!tdata) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ tdata->pdev = pdev;
+ tdata->num_sensors = pdata->num_sensors;
+ tdata->sensors = pdata->sensors;
+ tdata->irq = platform_get_irq(pdev, 0);
+ platform_set_drvdata(pdev, tdata);
+
+ tdata->tzd = kzalloc(
+ (sizeof(struct thermal_zone_device *) * tdata->num_sensors),
+ GFP_KERNEL);
+ if (!tdata->tzd) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
+ /* Program a default _max value for each sensor */
+ ret = program_tmax(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Programming _max failed:%d\n", ret);
+ goto exit_tzd;
+ }
+
+ /*
+ * Register with IIO to sample temperature values
+ *
+ * Order of the channels obtained from adc:
+ * "SYSTHERM0", "SYSTHERM1", "SYSTHERM2", "PMICDIE"
+ */
+ tdata->iio_chan = iio_channel_get_all(&pdev->dev);
+ if (tdata->iio_chan == NULL) {
+ dev_err(&pdev->dev, "tdata->iio_chan is null\n");
+ ret = -EINVAL;
+ goto exit_tzd;
+ }
+
+ /* Check whether we got all the four channels */
+ ret = iio_channel_get_num(tdata->iio_chan);
+ if (ret != PMIC_THERMAL_SENSORS) {
+ dev_err(&pdev->dev, "incorrect number of channels:%d\n", ret);
+ ret = -EFAULT;
+ goto exit_iio;
+ }
+
+ /* Register each sensor with the generic thermal framework */
+ for (i = 0; i < tdata->num_sensors; i++) {
+ tdata->tzd[i] = thermal_zone_device_register(
+ tdata->sensors[i].name, 1, 1,
+ initialize_sensor(&tdata->sensors[i]), &tzd_ops, NULL, 0, 0);
+
+ if (IS_ERR(tdata->tzd[i])) {
+ ret = PTR_ERR(tdata->tzd[i]);
+ dev_err(&pdev->dev,
+ "registering thermal sensor %s failed: %d\n",
+ tdata->sensors[i].name, ret);
+ goto exit_reg;
+ }
+ }
+
+ tdata->thrm_addr = ioremap_nocache(PMIC_SRAM_BASE_ADDR, IOMAP_SIZE);
+ if (!tdata->thrm_addr) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "ioremap_nocache failed\n");
+ goto exit_reg;
+ }
+
+ /* Register for Interrupt Handler */
+ ret = request_threaded_irq(tdata->irq, mrfl_thermal_intrpt_handler, thermal_intrpt,
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, tdata);
+ if (ret) {
+ dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+ goto exit_ioremap;
+ }
+
+ /* Enable Thermal Monitoring */
+ ret = enable_tm();
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling TM failed:%d\n", ret);
+ goto exit_irq;
+ }
+
+ return 0;
+
+exit_irq:
+ free_irq(tdata->irq, tdata);
+exit_ioremap:
+ iounmap(tdata->thrm_addr);
+exit_reg:
+ while (--i >= 0)
+ thermal_zone_device_unregister(tdata->tzd[i]);
+exit_iio:
+ iio_channel_release_all(tdata->iio_chan);
+exit_tzd:
+ kfree(tdata->tzd);
+exit_free:
+ kfree(tdata);
+ return ret;
+}
+
+static int mrfl_thermal_resume(struct device *dev)
+{
+ dev_info(dev, "resume called.\n");
+ return 0;
+}
+
+static int mrfl_thermal_suspend(struct device *dev)
+{
+ dev_info(dev, "suspend called.\n");
+ return 0;
+}
+
+static int mrfl_thermal_remove(struct platform_device *pdev)
+{
+ int i;
+ struct thermal_data *tdata = platform_get_drvdata(pdev);
+
+ if (!tdata)
+ return 0;
+
+ for (i = 0; i < tdata->num_sensors; i++)
+ thermal_zone_device_unregister(tdata->tzd[i]);
+
+ free_irq(tdata->irq, tdata);
+ iounmap(tdata->thrm_addr);
+ iio_channel_release_all(tdata->iio_chan);
+ kfree(tdata->tzd);
+ kfree(tdata);
+ return 0;
+}
+
+/*********************************************************************
+ * Driver initialization and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops thermal_pm_ops = {
+ .suspend = mrfl_thermal_suspend,
+ .resume = mrfl_thermal_resume,
+};
+
+static struct platform_driver mrfl_thermal_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &thermal_pm_ops,
+ },
+ .probe = mrfl_thermal_probe,
+ .remove = mrfl_thermal_remove,
+};
+
+static int mrfl_thermal_module_init(void)
+{
+ return platform_driver_register(&mrfl_thermal_driver);
+}
+
+static void mrfl_thermal_module_exit(void)
+{
+ platform_driver_unregister(&mrfl_thermal_driver);
+}
+
+/* RPMSG related functionality */
+static int mrfl_thermal_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ if (!rpdev) {
+ pr_err("rpmsg channel not created\n");
+ return -ENODEV;
+ }
+
+ dev_info(&rpdev->dev, "Probed mrfl_thermal rpmsg device\n");
+
+ return mrfl_thermal_module_init();
+}
+
+static void mrfl_thermal_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ mrfl_thermal_module_exit();
+ dev_info(&rpdev->dev, "Removed mrfl_thermal rpmsg device\n");
+}
+
+static void mrfl_thermal_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id mrfl_thermal_id_table[] = {
+ { .name = "rpmsg_mrfl_thermal" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, mrfl_thermal_id_table);
+
+static struct rpmsg_driver mrfl_thermal_rpmsg = {
+ .drv.name = DRIVER_NAME,
+ .drv.owner = THIS_MODULE,
+ .probe = mrfl_thermal_rpmsg_probe,
+ .callback = mrfl_thermal_rpmsg_cb,
+ .remove = mrfl_thermal_rpmsg_remove,
+ .id_table = mrfl_thermal_id_table,
+};
+
+static int __init mrfl_thermal_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&mrfl_thermal_rpmsg);
+}
+
+static void __exit mrfl_thermal_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&mrfl_thermal_rpmsg);
+}
+
+module_init(mrfl_thermal_rpmsg_init);
+module_exit(mrfl_thermal_rpmsg_exit);
+
+MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Platform Thermal Driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * intel_soc_thermal.c - Intel SoC Platform Thermal Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Shravan B M <shravan.k.b.m@intel.com>
+ *
+ * This driver registers to Thermal framework as SoC zone. It exposes
+ * two SoC DTS temperature with two writeable trip points.
+ */
+
+#define pr_fmt(fmt) "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/thermal.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/msr.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+
+#define DRIVER_NAME "soc_thrm"
+
+/* SOC DTS Registers */
+#define SOC_THERMAL_SENSORS 2
+#define SOC_THERMAL_TRIPS 2
+#define SOC_MAX_STATES 4
+#define DTS_ENABLE_REG 0xB0
+#define DTS_ENABLE 0x03
+#define DTS_TRIP_RW 0x03
+
+#define PUNIT_PORT 0x04
+#define PUNIT_TEMP_REG 0xB1
+#define PUNIT_AUX_REG 0xB2
+
+#define TJMAX_TEMP 90
+#define TJMAX_CODE 0x7F
+
+/* Default hysteresis values in C */
+#define DEFAULT_H2C_HYST 3
+#define MAX_HYST 7
+
+/* Power Limit registers */
+#define PKG_TURBO_POWER_LIMIT 0x610
+#define PKG_TURBO_CFG 0x670
+#define MSR_THERM_CFG1 0x673
+#define CPU_PWR_BUDGET_CTL 0x02
+
+/* PKG_TURBO_PL1 holds PL1 in terms of 32mW */
+#define PL_UNIT_MW 32
+
+/* Magic number symbolising Dynamic Turbo OFF */
+#define DISABLE_DYNAMIC_TURBO 0xB0FF
+
+/* IRQ details */
+#define SOC_DTS_CONTROL 0x80
+#define TRIP_STATUS_RO 0xB3
+#define TRIP_STATUS_RW 0xB4
+/* TE stands for THERMAL_EVENT */
+#define TE_AUX0 0xB5
+#define ENABLE_AUX_INTRPT 0x0F
+#define ENABLE_CPU0 (1 << 16)
+#define RTE_ENABLE (1 << 9)
+
+static int tjmax_temp;
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct platform_soc_data {
+ struct thermal_zone_device *tzd[SOC_THERMAL_SENSORS];
+ struct thermal_cooling_device *soc_cdev; /* PL1 control */
+ int irq;
+};
+
+struct cooling_device_info {
+ struct soc_throttle_data *soc_data;
+ /* Lock protecting the soc_cur_state variable */
+ struct mutex lock_state;
+ unsigned long soc_cur_state;
+};
+
+struct thermal_device_info {
+ int sensor_index;
+ struct mutex lock_aux;
+};
+
+static inline u32 read_soc_reg(unsigned int addr)
+{
+ return intel_mid_msgbus_read32(PUNIT_PORT, addr);
+}
+
+static inline void write_soc_reg(unsigned int addr, u32 val)
+{
+ intel_mid_msgbus_write32(PUNIT_PORT, addr, val);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dts_regs {
+ char *name;
+ u32 addr;
+} dts_regs[] = {
+ /* Thermal Management Registers */
+ {"PTMC", 0x80},
+ {"TRR0", 0x81},
+ {"TRR1", 0x82},
+ {"TTS", 0x83},
+ {"TELB", 0x84},
+ {"TELT", 0x85},
+ {"GFXT", 0x88},
+ {"VEDT", 0x89},
+ {"VECT", 0x8A},
+ {"VSPT", 0x8B},
+ {"ISPT", 0x8C},
+ {"SWT", 0x8D},
+ /* Trip Event Registers */
+ {"DTSC", 0xB0},
+ {"TRR", 0xB1},
+ {"PTPS", 0xB2},
+ {"PTTS", 0xB3},
+ {"PTTSS", 0xB4},
+ {"TE_AUX0", 0xB5},
+ {"TE_AUX1", 0xB6},
+ {"TE_AUX2", 0xB7},
+ {"TE_AUX3", 0xB8},
+ {"TTE_VRIcc", 0xB9},
+ {"TTE_VRHOT", 0xBA},
+ {"TTE_PROCHOT", 0xBB},
+ {"TTE_SLM0", 0xBC},
+ {"TTE_SLM1", 0xBD},
+ {"BWTE", 0xBE},
+ {"TTE_SWT", 0xBF},
+ /* MSI Message Registers */
+ {"TMA", 0xC0},
+ {"TMD", 0xC1},
+};
+
+/* /sys/kernel/debug/soc_thermal/soc_dts */
+static struct dentry *soc_dts_dent;
+static struct dentry *soc_thermal_dir;
+
+static int soc_dts_debugfs_show(struct seq_file *s, void *unused)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(dts_regs); i++) {
+ val = read_soc_reg(dts_regs[i].addr);
+ seq_printf(s,
+ "%s[0x%X] Val: 0x%X\n",
+ dts_regs[i].name, dts_regs[i].addr, val);
+ }
+ return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, soc_dts_debugfs_show, NULL);
+}
+
+static const struct file_operations soc_dts_debugfs_fops = {
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void create_soc_dts_debugfs(void)
+{
+ int err;
+
+ /* /sys/kernel/debug/soc_thermal/ */
+ soc_thermal_dir = debugfs_create_dir("soc_thermal", NULL);
+ if (IS_ERR(soc_thermal_dir)) {
+ err = PTR_ERR(soc_thermal_dir);
+ pr_err("debugfs_create_dir failed:%d\n", err);
+ return;
+ }
+
+ /* /sys/kernel/debug/soc_thermal/soc_dts */
+ soc_dts_dent = debugfs_create_file("soc_dts", S_IFREG | S_IRUGO,
+ soc_thermal_dir, NULL,
+ &soc_dts_debugfs_fops);
+ if (IS_ERR(soc_dts_dent)) {
+ err = PTR_ERR(soc_dts_dent);
+ debugfs_remove_recursive(soc_thermal_dir);
+ pr_err("debugfs_create_file failed:%d\n", err);
+ }
+}
+
+static void remove_soc_dts_debugfs(void)
+{
+ debugfs_remove_recursive(soc_thermal_dir);
+}
+#else
+static inline void create_soc_dts_debugfs(void) { }
+static inline void remove_soc_dts_debugfs(void) { }
+#endif
+
+static
+struct cooling_device_info *initialize_cdev(struct platform_device *pdev)
+{
+ struct cooling_device_info *cdev_info =
+ kzalloc(sizeof(struct cooling_device_info), GFP_KERNEL);
+ if (!cdev_info)
+ return NULL;
+
+ cdev_info->soc_data = pdev->dev.platform_data;
+ mutex_init(&cdev_info->lock_state);
+ return cdev_info;
+}
+
+static struct thermal_device_info *initialize_sensor(int index)
+{
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+ td_info->sensor_index = index;
+ mutex_init(&td_info->lock_aux);
+
+ return td_info;
+}
+
+static void enable_soc_dts(void)
+{
+ int i;
+ u32 val, eax, edx;
+
+ rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+ /* B[8:10] H2C Hyst */
+ eax = (eax & ~(0x7 << 8)) | (DEFAULT_H2C_HYST << 8);
+
+ /* Set the Hysteresis value */
+ wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+ /* Enable the DTS */
+ write_soc_reg(DTS_ENABLE_REG, DTS_ENABLE);
+
+ val = read_soc_reg(SOC_DTS_CONTROL);
+ write_soc_reg(SOC_DTS_CONTROL, val | ENABLE_AUX_INTRPT | ENABLE_CPU0);
+
+ /* Enable Interrupts for all the AUX trips for the DTS */
+ for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+ val = read_soc_reg(TE_AUX0 + i);
+ write_soc_reg(TE_AUX0 + i, (val | RTE_ENABLE));
+ }
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+ int trip, long *hyst)
+{
+ u32 eax, edx;
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ /* Hysteresis is only supported for trip point 0 */
+ if (trip != 0) {
+ *hyst = 0;
+ return 0;
+ }
+
+ mutex_lock(&td_info->lock_aux);
+
+ rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+ /* B[8:10] H2C Hyst, for trip 0. Report hysteresis in mC */
+ *hyst = ((eax >> 8) & 0x7) * 1000;
+
+ mutex_unlock(&td_info->lock_aux);
+ return 0;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+ int trip, long hyst)
+{
+ u32 eax, edx;
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ /* Convert from mC to C */
+ hyst /= 1000;
+
+ if (trip != 0 || hyst < 0 || hyst > MAX_HYST)
+ return -EINVAL;
+
+ mutex_lock(&td_info->lock_aux);
+
+ rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+ /* B[8:10] H2C Hyst */
+ eax = (eax & ~(0x7 << 8)) | (hyst << 8);
+
+ wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+ mutex_unlock(&td_info->lock_aux);
+ return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+ struct thermal_device_info *td_info = tzd->devdata;
+ u32 val = read_soc_reg(PUNIT_TEMP_REG);
+
+ /* Extract bits[0:7] or [8:15] using sensor_index */
+ *temp = (val >> (8 * td_info->sensor_index)) & 0xFF;
+
+ if (*temp == 0)
+ return 0;
+
+ /* Calibrate the temperature */
+ *temp = TJMAX_CODE - *temp + tjmax_temp;
+
+ /* Convert to mC */
+ *temp *= 1000;
+
+ return 0;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+ int trip, enum thermal_trip_type *trip_type)
+{
+ /* All are passive trip points */
+ *trip_type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+ int trip, long *trip_temp)
+{
+ u32 aux_value = read_soc_reg(PUNIT_AUX_REG);
+
+ /* aux0 b[0:7], aux1 b[8:15], aux2 b[16:23], aux3 b[24:31] */
+ *trip_temp = (aux_value >> (8 * trip)) & 0xFF;
+
+ /* Calibrate the trip point temperature */
+ *trip_temp = tjmax_temp - *trip_temp;
+
+ /* Convert to mC and report */
+ *trip_temp *= 1000;
+
+ return 0;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+ int trip, long trip_temp)
+{
+ u32 aux_trip, aux = 0;
+ struct thermal_device_info *td_info = tzd->devdata;
+
+ /* Convert from mC to C */
+ trip_temp /= 1000;
+
+ /* The trip temp is 8 bits wide (unsigned) */
+ if (trip_temp > 255)
+ return -EINVAL;
+
+ /* Assign last byte to unsigned 32 */
+ aux_trip = trip_temp & 0xFF;
+
+ /* Calibrate w.r.t TJMAX_TEMP */
+ aux_trip = tjmax_temp - aux_trip;
+
+ mutex_lock(&td_info->lock_aux);
+ aux = read_soc_reg(PUNIT_AUX_REG);
+ switch (trip) {
+ case 0:
+ /* aux0 bits 0:7 */
+ aux = (aux & 0xFFFFFF00) | (aux_trip << (8 * trip));
+ break;
+ case 1:
+ /* aux1 bits 8:15 */
+ aux = (aux & 0xFFFF00FF) | (aux_trip << (8 * trip));
+ break;
+ }
+ write_soc_reg(PUNIT_AUX_REG, aux);
+
+ mutex_unlock(&td_info->lock_aux);
+
+ return 0;
+}
+
+/* SoC cooling device callbacks */
+static int soc_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ /* SoC has 4 levels of throttling from 0 to 3 */
+ *state = SOC_MAX_STATES - 1;
+ return 0;
+}
+
+static int soc_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cooling_device_info *cdev_info =
+ (struct cooling_device_info *)cdev->devdata;
+
+ mutex_lock(&cdev_info->lock_state);
+ *state = cdev_info->soc_cur_state;
+ mutex_unlock(&cdev_info->lock_state);
+
+ return 0;
+}
+
+static void set_floor_freq(int val)
+{
+ u32 eax;
+
+ eax = read_soc_reg(CPU_PWR_BUDGET_CTL);
+
+ /* Set bits[8:14] of eax to val */
+ eax = (eax & ~(0x7F << 8)) | (val << 8);
+
+ write_soc_reg(CPU_PWR_BUDGET_CTL, eax);
+}
+
+static int disable_dynamic_turbo(struct cooling_device_info *cdev_info)
+{
+ u32 eax, edx;
+
+ mutex_lock(&cdev_info->lock_state);
+
+ rdmsr_on_cpu(0, PKG_TURBO_CFG, &eax, &edx);
+
+ /* Set bits[0:2] to 0 to enable TjMax Turbo mode */
+ eax = eax & ~0x07;
+
+ /* Set bit[8] to 0 to disable Dynamic Turbo */
+ eax = eax & ~(1 << 8);
+
+ /* Set bits[9:11] to 0 disable Dynamic Turbo Policy */
+ eax = eax & ~(0x07 << 9);
+
+ wrmsr_on_cpu(0, PKG_TURBO_CFG, eax, edx);
+
+ /*
+ * Now that we disabled Dynamic Turbo, we can
+ * make the floor frequency ratio also 0.
+ */
+ set_floor_freq(0);
+
+ cdev_info->soc_cur_state = DISABLE_DYNAMIC_TURBO;
+
+ mutex_unlock(&cdev_info->lock_state);
+ return 0;
+}
+
+static int soc_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ u32 eax, edx;
+ struct soc_throttle_data *data;
+ struct cooling_device_info *cdev_info =
+ (struct cooling_device_info *)cdev->devdata;
+
+ if (state == DISABLE_DYNAMIC_TURBO)
+ return disable_dynamic_turbo(cdev_info);
+
+ if (state >= SOC_MAX_STATES) {
+ pr_err("Invalid SoC throttle state:%ld\n", state);
+ return -EINVAL;
+ }
+
+ mutex_lock(&cdev_info->lock_state);
+
+ data = &cdev_info->soc_data[state];
+
+ rdmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, &eax, &edx);
+
+ /* Set bits[0:14] of eax to 'data->power_limit' */
+ eax = (eax & ~0x7FFF) | data->power_limit;
+
+ wrmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, eax, edx);
+
+ set_floor_freq(data->floor_freq);
+
+ cdev_info->soc_cur_state = state;
+
+ mutex_unlock(&cdev_info->lock_state);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int soc_get_force_state_override(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ int i;
+ int pl1_vals_mw[SOC_MAX_STATES];
+ struct cooling_device_info *cdev_info =
+ (struct cooling_device_info *)cdev->devdata;
+
+ mutex_lock(&cdev_info->lock_state);
+
+ /* PKG_TURBO_PL1 holds PL1 in terms of 32mW. So, multiply by 32 */
+ for (i = 0; i < SOC_MAX_STATES; i++) {
+ pl1_vals_mw[i] =
+ cdev_info->soc_data[i].power_limit * PL_UNIT_MW;
+ }
+
+ mutex_unlock(&cdev_info->lock_state);
+
+ return sprintf(buf, "%d %d %d %d\n", pl1_vals_mw[0], pl1_vals_mw[1],
+ pl1_vals_mw[2], pl1_vals_mw[3]);
+}
+
+static int soc_set_force_state_override(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ int i, ret;
+ int pl1_vals_mw[SOC_MAX_STATES];
+ unsigned long cur_state;
+ struct cooling_device_info *cdev_info =
+ (struct cooling_device_info *)cdev->devdata;
+
+ /*
+ * The four space separated values entered via the sysfs node
+ * override the default values configured through platform data.
+ */
+ ret = sscanf(buf, "%d %d %d %d", &pl1_vals_mw[0], &pl1_vals_mw[1],
+ &pl1_vals_mw[2], &pl1_vals_mw[3]);
+ if (ret != SOC_MAX_STATES) {
+ pr_err("Invalid values in soc_set_force_state_override\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&cdev_info->lock_state);
+
+ /* PKG_TURBO_PL1 takes PL1 in terms of 32mW. So, divide by 32 */
+ for (i = 0; i < SOC_MAX_STATES; i++) {
+ cdev_info->soc_data[i].power_limit =
+ pl1_vals_mw[i] / PL_UNIT_MW;
+ }
+
+ /* Update the cur_state value of this cooling device */
+ cur_state = cdev_info->soc_cur_state;
+
+ mutex_unlock(&cdev_info->lock_state);
+
+ return soc_set_cur_state(cdev, cur_state);
+}
+#endif
+
+static void notify_thermal_event(struct thermal_zone_device *tzd,
+ long temp, int event, int level)
+{
+ char *thermal_event[5];
+
+ pr_info("Thermal Event: sensor: %s, cur_temp: %ld, event: %d, level: %d\n",
+ tzd->type, temp, event, level);
+
+ thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=%s", tzd->type);
+ thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%ld", temp);
+ thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+ thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", level);
+ thermal_event[4] = NULL;
+
+ kobject_uevent_env(&tzd->device.kobj, KOBJ_CHANGE, thermal_event);
+
+ kfree(thermal_event[3]);
+ kfree(thermal_event[2]);
+ kfree(thermal_event[1]);
+ kfree(thermal_event[0]);
+
+ return;
+}
+
+static int get_max_temp(struct platform_soc_data *pdata, long *cur_temp)
+{
+ int i, ret;
+ long temp;
+
+ /*
+ * The SoC has two or more DTS placed, to determine the
+ * temperature of the SoC. The hardware actions are taken
+ * using T(DTS) which is MAX(T(DTS0), T(DTS1), ... T(DTSn))
+ *
+ * Do not report error, as long as we can read at least
+ * one DTS correctly.
+ */
+ ret = show_temp(pdata->tzd[0], cur_temp);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < SOC_THERMAL_SENSORS; i++) {
+ ret = show_temp(pdata->tzd[i], &temp);
+ if (ret)
+ goto fail_safe;
+
+ if (temp > *cur_temp)
+ *cur_temp = temp;
+ }
+
+fail_safe:
+ /*
+ * We have one valid DTS temperature; Use that,
+ * instead of reporting error.
+ */
+ return 0;
+}
+
+static irqreturn_t soc_dts_intrpt(int irq, void *dev_data)
+{
+ u32 irq_sts, cur_sts;
+ int i, ret, event, level = -1;
+ long cur_temp;
+ struct thermal_zone_device *tzd;
+ struct platform_soc_data *pdata = (struct platform_soc_data *)dev_data;
+
+ if (!pdata || !pdata->tzd[0])
+ return IRQ_NONE;
+
+ mutex_lock(&thrm_update_lock);
+
+ tzd = pdata->tzd[0];
+
+ irq_sts = read_soc_reg(TRIP_STATUS_RW);
+ cur_sts = read_soc_reg(TRIP_STATUS_RO);
+
+ for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+ if (irq_sts & (1 << i)) {
+ level = i;
+ event = !!(cur_sts & (1 << i));
+ /* Clear the status bit by writing 1 */
+ irq_sts |= (1 << i);
+ break;
+ }
+ }
+
+ /* level == -1, indicates an invalid event */
+ if (level == -1) {
+ dev_err(&tzd->device, "Invalid event from SoC DTS\n");
+ goto exit;
+ }
+
+ ret = get_max_temp(pdata, &cur_temp);
+ if (ret) {
+ dev_err(&tzd->device, "Cannot read SoC DTS temperature\n");
+ goto exit;
+ }
+
+ /* Notify using UEvent */
+ notify_thermal_event(tzd, cur_temp, event, level);
+
+ /* Clear the status bits */
+ write_soc_reg(TRIP_STATUS_RW, irq_sts);
+
+exit:
+ mutex_unlock(&thrm_update_lock);
+ return IRQ_HANDLED;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = show_temp,
+ .get_trip_type = show_trip_type,
+ .get_trip_temp = show_trip_temp,
+ .set_trip_temp = store_trip_temp,
+ .get_trip_hyst = show_trip_hyst,
+ .set_trip_hyst = store_trip_hyst,
+};
+
+static struct thermal_cooling_device_ops soc_cooling_ops = {
+ .get_max_state = soc_get_max_state,
+ .get_cur_state = soc_get_cur_state,
+ .set_cur_state = soc_set_cur_state,
+#ifdef CONFIG_DEBUG_THERMAL
+ .get_force_state_override = soc_get_force_state_override,
+ .set_force_state_override = soc_set_force_state_override,
+#endif
+};
+
+/*********************************************************************
+ * Driver initialization and finalization
+ *********************************************************************/
+
+static irqreturn_t soc_dts_intrpt_handler(int irq, void *dev_data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int soc_thermal_probe(struct platform_device *pdev)
+{
+ struct platform_soc_data *pdata;
+ int i, ret;
+ u32 eax, edx;
+ static char *name[SOC_THERMAL_SENSORS] = {"SoC_DTS0", "SoC_DTS1"};
+
+ pdata = kzalloc(sizeof(struct platform_soc_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = rdmsr_safe_on_cpu(0, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (ret) {
+ tjmax_temp = TJMAX_TEMP;
+ dev_err(&pdev->dev, "TjMax read from MSR %x failed, error:%d\n",
+ MSR_IA32_TEMPERATURE_TARGET, ret);
+ } else {
+ tjmax_temp = (eax >> 16) & 0xff;
+ dev_dbg(&pdev->dev, "TjMax is %d degrees C\n", tjmax_temp);
+ }
+
+ /* Register each sensor with the generic thermal framework */
+ for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+ pdata->tzd[i] = thermal_zone_device_register(name[i],
+ SOC_THERMAL_TRIPS, DTS_TRIP_RW,
+ initialize_sensor(i),
+ &tzd_ops, NULL, 0, 0);
+ if (IS_ERR(pdata->tzd[i])) {
+ ret = PTR_ERR(pdata->tzd[i]);
+ dev_err(&pdev->dev, "tzd register failed: %d\n", ret);
+ goto exit_reg;
+ }
+ }
+
+ /* Register a cooling device for PL1 (power limit) control */
+ pdata->soc_cdev = thermal_cooling_device_register("SoC",
+ initialize_cdev(pdev),
+ &soc_cooling_ops);
+ if (IS_ERR(pdata->soc_cdev)) {
+ ret = PTR_ERR(pdata->soc_cdev);
+ pdata->soc_cdev = NULL;
+ goto exit_reg;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed:%d\n", ret);
+ goto exit_cdev;
+ }
+
+ pdata->irq = ret;
+
+ /* Register for Interrupt Handler */
+ ret = request_threaded_irq(pdata->irq, soc_dts_intrpt_handler,
+ soc_dts_intrpt,
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+ goto exit_cdev;
+ }
+
+ /* Enable DTS0 and DTS1 */
+ enable_soc_dts();
+
+ create_soc_dts_debugfs();
+
+ return 0;
+
+exit_cdev:
+ thermal_cooling_device_unregister(pdata->soc_cdev);
+exit_reg:
+ while (--i >= 0) {
+ struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+ kfree(td_info);
+ thermal_zone_device_unregister(pdata->tzd[i]);
+ }
+ platform_set_drvdata(pdev, NULL);
+ kfree(pdata);
+ return ret;
+}
+
+static int soc_thermal_remove(struct platform_device *pdev)
+{
+ int i;
+ struct platform_soc_data *pdata = platform_get_drvdata(pdev);
+
+ /* Unregister each sensor with the generic thermal framework */
+ for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+ struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+ kfree(td_info);
+ thermal_zone_device_unregister(pdata->tzd[i]);
+ }
+ thermal_cooling_device_unregister(pdata->soc_cdev);
+ platform_set_drvdata(pdev, NULL);
+ free_irq(pdata->irq, pdata);
+ kfree(pdata);
+
+ remove_soc_dts_debugfs();
+
+ return 0;
+}
+
+static const struct platform_device_id therm_id_table[] = {
+ { DRIVER_NAME, 1},
+};
+
+static struct platform_driver soc_thermal_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = soc_thermal_probe,
+ .remove = soc_thermal_remove,
+ .id_table = therm_id_table,
+};
+
+static int __init soc_thermal_module_init(void)
+{
+ return platform_driver_register(&soc_thermal_driver);
+}
+
+static void __exit soc_thermal_module_exit(void)
+{
+ platform_driver_unregister(&soc_thermal_driver);
+}
+
+module_init(soc_thermal_module_init);
+module_exit(soc_thermal_module_exit);
+
+MODULE_AUTHOR("Shravan B M <shravan.k.b.m@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Thermal Driver");
+MODULE_LICENSE("GPL");
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- unsigned long temperature;
+ long temperature;
if (!tz->ops->set_trip_temp)
return -EPERM;
}
static ssize_t
+slope_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long slope;
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->set_slope)
+ return -EPERM;
+
+ if (kstrtol(buf, 10, &slope))
+ return -EINVAL;
+
+ ret = tz->ops->set_slope(tz, slope);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t
+slope_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int ret;
+ long slope;
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_slope)
+ return -EINVAL;
+
+ ret = tz->ops->get_slope(tz, &slope);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%ld\n", slope);
+}
+
+static ssize_t
+intercept_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long intercept;
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->set_intercept)
+ return -EPERM;
+
+ if (kstrtol(buf, 10, &intercept))
+ return -EINVAL;
+
+ ret = tz->ops->set_intercept(tz, intercept);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t
+intercept_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int ret;
+ long intercept;
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_intercept)
+ return -EINVAL;
+
+ ret = tz->ops->get_intercept(tz, &intercept);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%ld\n", intercept);
+}
+
+static ssize_t
policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
+static DEVICE_ATTR(slope, S_IRUGO | S_IWUSR, slope_show, slope_store);
+static DEVICE_ATTR(intercept,
+ S_IRUGO | S_IWUSR, intercept_show, intercept_store);
static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
/* sys I/F for cooling device */
return sprintf(buf, "%ld\n", state);
}
+/*
+ * Sysfs to read the mapped values and to override
+ * the default values mapped to each state during runtime.
+ */
+static ssize_t
+thermal_cooling_device_force_state_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_force_state_override(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_force_state_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ ret = cdev->ops->set_force_state_override(cdev, (char *) buf);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t
+thermal_cooling_device_available_states_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_available_states(cdev, buf);
+}
+
static ssize_t
thermal_cooling_device_cur_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
static DEVICE_ATTR(cur_state, 0644,
thermal_cooling_device_cur_state_show,
thermal_cooling_device_cur_state_store);
+static DEVICE_ATTR(force_state_override, 0644,
+ thermal_cooling_device_force_state_override_show,
+ thermal_cooling_device_force_state_override_store);
+static DEVICE_ATTR(available_states, 0444,
+ thermal_cooling_device_available_states_show, NULL);
static ssize_t
thermal_cooling_device_trip_point_show(struct device *dev,
goto free_temp_mem;
if (tz->ops->get_crit_temp) {
- unsigned long temperature;
+ long temperature;
if (!tz->ops->get_crit_temp(tz, &temperature)) {
snprintf(temp->temp_crit.name,
sizeof(temp->temp_crit.name),
result = device_create_file(&cdev->device, &dev_attr_max_state);
if (result)
- goto unregister;
+ goto remove_type;
result = device_create_file(&cdev->device, &dev_attr_cur_state);
if (result)
- goto unregister;
+ goto remove_max_state;
+
+ if (ops->get_force_state_override) {
+ result = device_create_file(&cdev->device,
+ &dev_attr_force_state_override);
+ if (result)
+ goto remove_cur_state;
+ }
/* Add 'this' new cdev to the global cdev list */
+ if (ops->get_available_states) {
+ result = device_create_file(&cdev->device,
+ &dev_attr_available_states);
+ if (result)
+ goto remove_force_override;
+ }
mutex_lock(&thermal_list_lock);
list_add(&cdev->node, &thermal_cdev_list);
mutex_unlock(&thermal_list_lock);
return cdev;
+remove_force_override:
+ if (cdev->ops->get_force_state_override)
+ device_remove_file(&cdev->device,
+ &dev_attr_force_state_override);
+remove_cur_state:
+ device_remove_file(&cdev->device, &dev_attr_cur_state);
+remove_max_state:
+ device_remove_file(&cdev->device, &dev_attr_max_state);
+remove_type:
+ device_remove_file(&cdev->device, &dev_attr_cdev_type);
unregister:
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
device_unregister(&cdev->device);
device_remove_file(&cdev->device, &dev_attr_cdev_type);
device_remove_file(&cdev->device, &dev_attr_max_state);
device_remove_file(&cdev->device, &dev_attr_cur_state);
-
+ if (cdev->ops->get_force_state_override)
+ device_remove_file(&cdev->device,
+ &dev_attr_force_state_override);
+ if (cdev->ops->get_available_states)
+ device_remove_file(&cdev->device,
+ &dev_attr_available_states);
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
device_unregister(&cdev->device);
return;
goto unregister;
}
+ /* Create Sysfs for slope/intercept values */
+ if (tz->ops->get_slope) {
+ result = device_create_file(&tz->device, &dev_attr_slope);
+ if (result)
+ goto unregister;
+ }
+
+ if (tz->ops->get_intercept) {
+ result = device_create_file(&tz->device, &dev_attr_intercept);
+ if (result)
+ goto unregister;
+ }
+
#ifdef CONFIG_THERMAL_EMULATION
result = device_create_file(&tz->device, &dev_attr_emul_temp);
if (result)
device_remove_file(&tz->device, &dev_attr_temp);
if (tz->ops->get_mode)
device_remove_file(&tz->device, &dev_attr_mode);
+ if (tz->ops->get_slope)
+ device_remove_file(&tz->device, &dev_attr_slope);
+ if (tz->ops->get_intercept)
+ device_remove_file(&tz->device, &dev_attr_intercept);
+
device_remove_file(&tz->device, &dev_attr_policy);
remove_trip_attrs(tz);
tz->governor = NULL;
driver.
config SERIAL_MFD_HSU
- tristate "Medfield High Speed UART support"
- depends on PCI
- select SERIAL_CORE
+ tristate "Medfield High Speed UART support"
+ depends on PCI
+ select SERIAL_CORE
config SERIAL_MFD_HSU_CONSOLE
- boolean "Medfile HSU serial console support"
- depends on SERIAL_MFD_HSU=y
- select SERIAL_CORE_CONSOLE
+ boolean "Medfield HSU serial console support"
+ depends on SERIAL_MFD_HSU=y
+ select SERIAL_CORE_CONSOLE
config SERIAL_BFIN
tristate "Blackfin serial port support"
obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
-obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
+obj-$(CONFIG_SERIAL_MFD_HSU) += mfd_core.o mfd_dma.o mfd_pci.o mfd_plat.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
+++ /dev/null
-/*
- * mfd.c: driver for High Speed UART device of Intel Medfield platform
- *
- * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
- *
- * (C) Copyright 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/* Notes:
- * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
- * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
- * are used for RX, odd chans for TX
- *
- * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
- * asserted, only when the HW is reset the DDCD and DDSR will
- * be triggered
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/slab.h>
-#include <linux/serial_reg.h>
-#include <linux/circ_buf.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial_mfd.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/nmi.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-#include <linux/pm_runtime.h>
-
-#define HSU_DMA_BUF_SIZE 2048
-
-#define chan_readl(chan, offset) readl(chan->reg + offset)
-#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
-
-#define mfd_readl(obj, offset) readl(obj->reg + offset)
-#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
-
-static int hsu_dma_enable;
-module_param(hsu_dma_enable, int, 0);
-MODULE_PARM_DESC(hsu_dma_enable,
- "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
-
-struct hsu_dma_buffer {
- u8 *buf;
- dma_addr_t dma_addr;
- u32 dma_size;
- u32 ofs;
-};
-
-struct hsu_dma_chan {
- u32 id;
- enum dma_data_direction dirt;
- struct uart_hsu_port *uport;
- void __iomem *reg;
-};
-
-struct uart_hsu_port {
- struct uart_port port;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned int lsr_break_flag;
- char name[12];
- int index;
- struct device *dev;
-
- struct hsu_dma_chan *txc;
- struct hsu_dma_chan *rxc;
- struct hsu_dma_buffer txbuf;
- struct hsu_dma_buffer rxbuf;
- int use_dma; /* flag for DMA/PIO */
- int running;
- int dma_tx_on;
-};
-
-/* Top level data structure of HSU */
-struct hsu_port {
- void __iomem *reg;
- unsigned long paddr;
- unsigned long iolen;
- u32 irq;
-
- struct uart_hsu_port port[3];
- struct hsu_dma_chan chans[10];
-
- struct dentry *debugfs;
-};
-
-static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
-{
- unsigned int val;
-
- if (offset > UART_MSR) {
- offset <<= 2;
- val = readl(up->port.membase + offset);
- } else
- val = (unsigned int)readb(up->port.membase + offset);
-
- return val;
-}
-
-static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
-{
- if (offset > UART_MSR) {
- offset <<= 2;
- writel(value, up->port.membase + offset);
- } else {
- unsigned char val = value & 0xff;
- writeb(val, up->port.membase + offset);
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define HSU_REGS_BUFSIZE 1024
-
-
-static ssize_t port_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct uart_hsu_port *up = file->private_data;
- char *buf;
- u32 len = 0;
- ssize_t ret;
-
- buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MFD HSU port[%d] regs:\n", up->index);
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "=================================\n");
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
-
- if (len > HSU_REGS_BUFSIZE)
- len = HSU_REGS_BUFSIZE;
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return ret;
-}
-
-static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hsu_dma_chan *chan = file->private_data;
- char *buf;
- u32 len = 0;
- ssize_t ret;
-
- buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MFD HSU DMA channel [%d] regs:\n", chan->id);
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "=================================\n");
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
-
- if (len > HSU_REGS_BUFSIZE)
- len = HSU_REGS_BUFSIZE;
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations port_regs_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = port_show_regs,
- .llseek = default_llseek,
-};
-
-static const struct file_operations dma_regs_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = dma_show_regs,
- .llseek = default_llseek,
-};
-
-static int hsu_debugfs_init(struct hsu_port *hsu)
-{
- int i;
- char name[32];
-
- hsu->debugfs = debugfs_create_dir("hsu", NULL);
- if (!hsu->debugfs)
- return -ENOMEM;
-
- for (i = 0; i < 3; i++) {
- snprintf(name, sizeof(name), "port_%d_regs", i);
- debugfs_create_file(name, S_IFREG | S_IRUGO,
- hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
- }
-
- for (i = 0; i < 6; i++) {
- snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
- debugfs_create_file(name, S_IFREG | S_IRUGO,
- hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
- }
-
- return 0;
-}
-
-static void hsu_debugfs_remove(struct hsu_port *hsu)
-{
- if (hsu->debugfs)
- debugfs_remove_recursive(hsu->debugfs);
-}
-
-#else
-static inline int hsu_debugfs_init(struct hsu_port *hsu)
-{
- return 0;
-}
-
-static inline void hsu_debugfs_remove(struct hsu_port *hsu)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static void serial_hsu_enable_ms(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- up->ier |= UART_IER_MSI;
- serial_out(up, UART_IER, up->ier);
-}
-
-void hsu_dma_tx(struct uart_hsu_port *up)
-{
- struct circ_buf *xmit = &up->port.state->xmit;
- struct hsu_dma_buffer *dbuf = &up->txbuf;
- int count;
-
- /* test_and_set_bit may be better, but anyway it's in lock protected mode */
- if (up->dma_tx_on)
- return;
-
- /* Update the circ buf info */
- xmit->tail += dbuf->ofs;
- xmit->tail &= UART_XMIT_SIZE - 1;
-
- up->port.icount.tx += dbuf->ofs;
- dbuf->ofs = 0;
-
- /* Disable the channel */
- chan_writel(up->txc, HSU_CH_CR, 0x0);
-
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
- dma_sync_single_for_device(up->port.dev,
- dbuf->dma_addr,
- dbuf->dma_size,
- DMA_TO_DEVICE);
-
- count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- dbuf->ofs = count;
-
- /* Reprogram the channel */
- chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
- chan_writel(up->txc, HSU_CH_D0TSR, count);
-
- /* Reenable the channel */
- chan_writel(up->txc, HSU_CH_DCR, 0x1
- | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24));
- up->dma_tx_on = 1;
- chan_writel(up->txc, HSU_CH_CR, 0x1);
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&up->port);
-}
-
-/* The buffer is already cache coherent */
-void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
-{
- dbuf->ofs = 0;
-
- chan_writel(rxc, HSU_CH_BSR, 32);
- chan_writel(rxc, HSU_CH_MOTSR, 4);
-
- chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
- chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
- chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
- );
- chan_writel(rxc, HSU_CH_CR, 0x3);
-}
-
-/* Protected by spin_lock_irqsave(port->lock) */
-static void serial_hsu_start_tx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- if (up->use_dma) {
- hsu_dma_tx(up);
- } else if (!(up->ier & UART_IER_THRI)) {
- up->ier |= UART_IER_THRI;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-static void serial_hsu_stop_tx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- struct hsu_dma_chan *txc = up->txc;
-
- if (up->use_dma)
- chan_writel(txc, HSU_CH_CR, 0x0);
- else if (up->ier & UART_IER_THRI) {
- up->ier &= ~UART_IER_THRI;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-/* This is always called in spinlock protected mode, so
- * modify timeout timer is safe here */
-void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
-{
- struct hsu_dma_buffer *dbuf = &up->rxbuf;
- struct hsu_dma_chan *chan = up->rxc;
- struct uart_port *port = &up->port;
- struct tty_port *tport = &port->state->port;
- int count;
-
- /*
- * First need to know how many is already transferred,
- * then check if its a timeout DMA irq, and return
- * the trail bytes out, push them up and reenable the
- * channel
- */
-
- /* Timeout IRQ, need wait some time, see Errata 2 */
- if (int_sts & 0xf00)
- udelay(2);
-
- /* Stop the channel */
- chan_writel(chan, HSU_CH_CR, 0x0);
-
- count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
- if (!count) {
- /* Restart the channel before we leave */
- chan_writel(chan, HSU_CH_CR, 0x3);
- return;
- }
-
- dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
- dbuf->dma_size, DMA_FROM_DEVICE);
-
- /*
- * Head will only wrap around when we recycle
- * the DMA buffer, and when that happens, we
- * explicitly set tail to 0. So head will
- * always be greater than tail.
- */
- tty_insert_flip_string(tport, dbuf->buf, count);
- port->icount.rx += count;
-
- dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
- dbuf->dma_size, DMA_FROM_DEVICE);
-
- /* Reprogram the channel */
- chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
- chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
- chan_writel(chan, HSU_CH_DCR, 0x1
- | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
- );
- tty_flip_buffer_push(tport);
-
- chan_writel(chan, HSU_CH_CR, 0x3);
-
-}
-
-static void serial_hsu_stop_rx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- struct hsu_dma_chan *chan = up->rxc;
-
- if (up->use_dma)
- chan_writel(chan, HSU_CH_CR, 0x2);
- else {
- up->ier &= ~UART_IER_RLSI;
- up->port.read_status_mask &= ~UART_LSR_DR;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-static inline void receive_chars(struct uart_hsu_port *up, int *status)
-{
- unsigned int ch, flag;
- unsigned int max_count = 256;
-
- do {
- ch = serial_in(up, UART_RX);
- flag = TTY_NORMAL;
- up->port.icount.rx++;
-
- if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
- UART_LSR_FE | UART_LSR_OE))) {
-
- dev_warn(up->dev, "We really rush into ERR/BI case"
- "status = 0x%02x", *status);
- /* For statistics only */
- if (*status & UART_LSR_BI) {
- *status &= ~(UART_LSR_FE | UART_LSR_PE);
- up->port.icount.brk++;
- /*
- * We do the SysRQ and SAK checking
- * here because otherwise the break
- * may get masked by ignore_status_mask
- * or read_status_mask.
- */
- if (uart_handle_break(&up->port))
- goto ignore_char;
- } else if (*status & UART_LSR_PE)
- up->port.icount.parity++;
- else if (*status & UART_LSR_FE)
- up->port.icount.frame++;
- if (*status & UART_LSR_OE)
- up->port.icount.overrun++;
-
- /* Mask off conditions which should be ignored. */
- *status &= up->port.read_status_mask;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
- if (up->port.cons &&
- up->port.cons->index == up->port.line) {
- /* Recover the break flag from console xmit */
- *status |= up->lsr_break_flag;
- up->lsr_break_flag = 0;
- }
-#endif
- if (*status & UART_LSR_BI) {
- flag = TTY_BREAK;
- } else if (*status & UART_LSR_PE)
- flag = TTY_PARITY;
- else if (*status & UART_LSR_FE)
- flag = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(&up->port, ch))
- goto ignore_char;
-
- uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
- ignore_char:
- *status = serial_in(up, UART_LSR);
- } while ((*status & UART_LSR_DR) && max_count--);
- tty_flip_buffer_push(&up->port.state->port);
-}
-
-static void transmit_chars(struct uart_hsu_port *up)
-{
- struct circ_buf *xmit = &up->port.state->xmit;
- int count;
-
- if (up->port.x_char) {
- serial_out(up, UART_TX, up->port.x_char);
- up->port.icount.tx++;
- up->port.x_char = 0;
- return;
- }
- if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
- serial_hsu_stop_tx(&up->port);
- return;
- }
-
- /* The IRQ is for TX FIFO half-empty */
- count = up->port.fifosize / 2;
-
- do {
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-
- up->port.icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (--count > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&up->port);
-
- if (uart_circ_empty(xmit))
- serial_hsu_stop_tx(&up->port);
-}
-
-static inline void check_modem_status(struct uart_hsu_port *up)
-{
- int status;
-
- status = serial_in(up, UART_MSR);
-
- if ((status & UART_MSR_ANY_DELTA) == 0)
- return;
-
- if (status & UART_MSR_TERI)
- up->port.icount.rng++;
- if (status & UART_MSR_DDSR)
- up->port.icount.dsr++;
- /* We may only get DDCD when HW init and reset */
- if (status & UART_MSR_DDCD)
- uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
- /* Will start/stop_tx accordingly */
- if (status & UART_MSR_DCTS)
- uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
-
- wake_up_interruptible(&up->port.state->port.delta_msr_wait);
-}
-
-/*
- * This handles the interrupt from one port.
- */
-static irqreturn_t port_irq(int irq, void *dev_id)
-{
- struct uart_hsu_port *up = dev_id;
- unsigned int iir, lsr;
- unsigned long flags;
-
- if (unlikely(!up->running))
- return IRQ_NONE;
-
- spin_lock_irqsave(&up->port.lock, flags);
- if (up->use_dma) {
- lsr = serial_in(up, UART_LSR);
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
- UART_LSR_FE | UART_LSR_OE)))
- dev_warn(up->dev,
- "Got lsr irq while using DMA, lsr = 0x%2x\n",
- lsr);
- check_modem_status(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_HANDLED;
- }
-
- iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT) {
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_NONE;
- }
-
- lsr = serial_in(up, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(up, &lsr);
- check_modem_status(up);
-
- /* lsr will be renewed during the receive_chars */
- if (lsr & UART_LSR_THRE)
- transmit_chars(up);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_HANDLED;
-}
-
-static inline void dma_chan_irq(struct hsu_dma_chan *chan)
-{
- struct uart_hsu_port *up = chan->uport;
- unsigned long flags;
- u32 int_sts;
-
- spin_lock_irqsave(&up->port.lock, flags);
-
- if (!up->use_dma || !up->running)
- goto exit;
-
- /*
- * No matter what situation, need read clear the IRQ status
- * There is a bug, see Errata 5, HSD 2900918
- */
- int_sts = chan_readl(chan, HSU_CH_SR);
-
- /* Rx channel */
- if (chan->dirt == DMA_FROM_DEVICE)
- hsu_dma_rx(up, int_sts);
-
- /* Tx channel */
- if (chan->dirt == DMA_TO_DEVICE) {
- chan_writel(chan, HSU_CH_CR, 0x0);
- up->dma_tx_on = 0;
- hsu_dma_tx(up);
- }
-
-exit:
- spin_unlock_irqrestore(&up->port.lock, flags);
- return;
-}
-
-static irqreturn_t dma_irq(int irq, void *dev_id)
-{
- struct hsu_port *hsu = dev_id;
- u32 int_sts, i;
-
- int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
-
- /* Currently we only have 6 channels may be used */
- for (i = 0; i < 6; i++) {
- if (int_sts & 0x1)
- dma_chan_irq(&hsu->chans[i]);
- int_sts >>= 1;
- }
-
- return IRQ_HANDLED;
-}
-
-static unsigned int serial_hsu_tx_empty(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
- unsigned int ret;
-
- spin_lock_irqsave(&up->port.lock, flags);
- ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- return ret;
-}
-
-static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char status;
- unsigned int ret;
-
- status = serial_in(up, UART_MSR);
-
- ret = 0;
- if (status & UART_MSR_DCD)
- ret |= TIOCM_CAR;
- if (status & UART_MSR_RI)
- ret |= TIOCM_RNG;
- if (status & UART_MSR_DSR)
- ret |= TIOCM_DSR;
- if (status & UART_MSR_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char mcr = 0;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- mcr |= up->mcr;
-
- serial_out(up, UART_MCR, mcr);
-}
-
-static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- spin_lock_irqsave(&up->port.lock, flags);
- if (break_state == -1)
- up->lcr |= UART_LCR_SBC;
- else
- up->lcr &= ~UART_LCR_SBC;
- serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-/*
- * What special to do:
- * 1. chose the 64B fifo mode
- * 2. start dma or pio depends on configuration
- * 3. we only allocate dma memory when needed
- */
-static int serial_hsu_startup(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- pm_runtime_get_sync(up->dev);
-
- /*
- * Clear the FIFO buffers and disable them.
- * (they will be reenabled in set_termios())
- */
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_FCR, 0);
-
- /* Clear the interrupt registers. */
- (void) serial_in(up, UART_LSR);
- (void) serial_in(up, UART_RX);
- (void) serial_in(up, UART_IIR);
- (void) serial_in(up, UART_MSR);
-
- /* Now, initialize the UART, default is 8n1 */
- serial_out(up, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irqsave(&up->port.lock, flags);
-
- up->port.mctrl |= TIOCM_OUT2;
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
-
- /*
- * Finally, enable interrupts. Note: Modem status interrupts
- * are set via set_termios(), which will be occurring imminently
- * anyway, so we don't enable them here.
- */
- if (!up->use_dma)
- up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
- else
- up->ier = 0;
- serial_out(up, UART_IER, up->ier);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- /* DMA init */
- if (up->use_dma) {
- struct hsu_dma_buffer *dbuf;
- struct circ_buf *xmit = &port->state->xmit;
-
- up->dma_tx_on = 0;
-
- /* First allocate the RX buffer */
- dbuf = &up->rxbuf;
- dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
- if (!dbuf->buf) {
- up->use_dma = 0;
- goto exit;
- }
- dbuf->dma_addr = dma_map_single(port->dev,
- dbuf->buf,
- HSU_DMA_BUF_SIZE,
- DMA_FROM_DEVICE);
- dbuf->dma_size = HSU_DMA_BUF_SIZE;
-
- /* Start the RX channel right now */
- hsu_dma_start_rx_chan(up->rxc, dbuf);
-
- /* Next init the TX DMA */
- dbuf = &up->txbuf;
- dbuf->buf = xmit->buf;
- dbuf->dma_addr = dma_map_single(port->dev,
- dbuf->buf,
- UART_XMIT_SIZE,
- DMA_TO_DEVICE);
- dbuf->dma_size = UART_XMIT_SIZE;
-
- /* This should not be changed all around */
- chan_writel(up->txc, HSU_CH_BSR, 32);
- chan_writel(up->txc, HSU_CH_MOTSR, 4);
- dbuf->ofs = 0;
- }
-
-exit:
- /* And clear the interrupt registers again for luck. */
- (void) serial_in(up, UART_LSR);
- (void) serial_in(up, UART_RX);
- (void) serial_in(up, UART_IIR);
- (void) serial_in(up, UART_MSR);
-
- up->running = 1;
- return 0;
-}
-
-static void serial_hsu_shutdown(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- /* Disable interrupts from this port */
- up->ier = 0;
- serial_out(up, UART_IER, 0);
- up->running = 0;
-
- spin_lock_irqsave(&up->port.lock, flags);
- up->port.mctrl &= ~TIOCM_OUT2;
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- /* Disable break condition and FIFOs */
- serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_FCR, 0);
-
- pm_runtime_put(up->dev);
-}
-
-static void
-serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char cval, fcr = 0;
- unsigned long flags;
- unsigned int baud, quot;
- u32 ps, mul;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- cval = UART_LCR_WLEN5;
- break;
- case CS6:
- cval = UART_LCR_WLEN6;
- break;
- case CS7:
- cval = UART_LCR_WLEN7;
- break;
- default:
- case CS8:
- cval = UART_LCR_WLEN8;
- break;
- }
-
- /* CMSPAR isn't supported by this driver */
- termios->c_cflag &= ~CMSPAR;
-
- if (termios->c_cflag & CSTOPB)
- cval |= UART_LCR_STOP;
- if (termios->c_cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(termios->c_cflag & PARODD))
- cval |= UART_LCR_EPAR;
-
- /*
- * The base clk is 50Mhz, and the baud rate come from:
- * baud = 50M * MUL / (DIV * PS * DLAB)
- *
- * For those basic low baud rate we can get the direct
- * scalar from 2746800, like 115200 = 2746800/24. For those
- * higher baud rate, we handle them case by case, mainly by
- * adjusting the MUL/PS registers, and DIV register is kept
- * as default value 0x3d09 to make things simple
- */
- baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
-
- quot = 1;
- ps = 0x10;
- mul = 0x3600;
- switch (baud) {
- case 3500000:
- mul = 0x3345;
- ps = 0xC;
- break;
- case 1843200:
- mul = 0x2400;
- break;
- case 3000000:
- case 2500000:
- case 2000000:
- case 1500000:
- case 1000000:
- case 500000:
- /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
- mul = baud / 500000 * 0x9C4;
- break;
- default:
- /* Use uart_get_divisor to get quot for other baud rates */
- quot = 0;
- }
-
- if (!quot)
- quot = uart_get_divisor(port, baud);
-
- if ((up->port.uartclk / quot) < (2400 * 16))
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
- else if ((up->port.uartclk / quot) < (230400 * 16))
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
- else
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
-
- fcr |= UART_FCR_HSU_64B_FIFO;
-
- /*
- * Ok, we're now changing the port state. Do it with
- * interrupts disabled.
- */
- spin_lock_irqsave(&up->port.lock, flags);
-
- /* Update the per-port timeout */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (termios->c_iflag & INPCK)
- up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
- up->port.read_status_mask |= UART_LSR_BI;
-
- /* Characters to ignore */
- up->port.ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- if (termios->c_iflag & IGNBRK) {
- up->port.ignore_status_mask |= UART_LSR_BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- up->port.ignore_status_mask |= UART_LSR_OE;
- }
-
- /* Ignore all characters if CREAD is not set */
- if ((termios->c_cflag & CREAD) == 0)
- up->port.ignore_status_mask |= UART_LSR_DR;
-
- /*
- * CTS flow control flag and modem status interrupts, disable
- * MSI by default
- */
- up->ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(&up->port, termios->c_cflag))
- up->ier |= UART_IER_MSI;
-
- serial_out(up, UART_IER, up->ier);
-
- if (termios->c_cflag & CRTSCTS)
- up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
- else
- up->mcr &= ~UART_MCR_AFE;
-
- serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
- serial_out(up, UART_LCR, cval); /* reset DLAB */
- serial_out(up, UART_MUL, mul); /* set MUL */
- serial_out(up, UART_PS, ps); /* set PS */
- up->lcr = cval; /* Save LCR */
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
- serial_out(up, UART_FCR, fcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-static void
-serial_hsu_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
-}
-
-static void serial_hsu_release_port(struct uart_port *port)
-{
-}
-
-static int serial_hsu_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-static void serial_hsu_config_port(struct uart_port *port, int flags)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- up->port.type = PORT_MFD;
-}
-
-static int
-serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- /* We don't want the core code to modify any port params */
- return -EINVAL;
-}
-
-static const char *
-serial_hsu_type(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- return up->name;
-}
-
-/* Mainly for uart console use */
-static struct uart_hsu_port *serial_hsu_ports[3];
-static struct uart_driver serial_hsu_reg;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-/* Wait for transmitter & holding register to empty */
-static inline void wait_for_xmitr(struct uart_hsu_port *up)
-{
- unsigned int status, tmout = 1000;
-
- /* Wait up to 1ms for the character to be sent. */
- do {
- status = serial_in(up, UART_LSR);
-
- if (status & UART_LSR_BI)
- up->lsr_break_flag = UART_LSR_BI;
-
- if (--tmout == 0)
- break;
- udelay(1);
- } while (!(status & BOTH_EMPTY));
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- tmout = 1000000;
- while (--tmout &&
- ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
- udelay(1);
- }
-}
-
-static void serial_hsu_console_putchar(struct uart_port *port, int ch)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- wait_for_xmitr(up);
- serial_out(up, UART_TX, ch);
-}
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- *
- * The console_lock must be held when we get here.
- */
-static void
-serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_hsu_port *up = serial_hsu_ports[co->index];
- unsigned long flags;
- unsigned int ier;
- int locked = 1;
-
- touch_nmi_watchdog();
-
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
-
- /* First save the IER then disable the interrupts */
- ier = serial_in(up, UART_IER);
- serial_out(up, UART_IER, 0);
-
- uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty
- * and restore the IER
- */
- wait_for_xmitr(up);
- serial_out(up, UART_IER, ier);
-
- if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
-}
-
-static struct console serial_hsu_console;
-
-static int __init
-serial_hsu_console_setup(struct console *co, char *options)
-{
- struct uart_hsu_port *up;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (co->index == -1 || co->index >= serial_hsu_reg.nr)
- co->index = 0;
- up = serial_hsu_ports[co->index];
- if (!up)
- return -ENODEV;
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(&up->port, co, baud, parity, bits, flow);
-}
-
-static struct console serial_hsu_console = {
- .name = "ttyMFD",
- .write = serial_hsu_console_write,
- .device = uart_console_device,
- .setup = serial_hsu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &serial_hsu_reg,
-};
-
-#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
-#else
-#define SERIAL_HSU_CONSOLE NULL
-#endif
-
-struct uart_ops serial_hsu_pops = {
- .tx_empty = serial_hsu_tx_empty,
- .set_mctrl = serial_hsu_set_mctrl,
- .get_mctrl = serial_hsu_get_mctrl,
- .stop_tx = serial_hsu_stop_tx,
- .start_tx = serial_hsu_start_tx,
- .stop_rx = serial_hsu_stop_rx,
- .enable_ms = serial_hsu_enable_ms,
- .break_ctl = serial_hsu_break_ctl,
- .startup = serial_hsu_startup,
- .shutdown = serial_hsu_shutdown,
- .set_termios = serial_hsu_set_termios,
- .pm = serial_hsu_pm,
- .type = serial_hsu_type,
- .release_port = serial_hsu_release_port,
- .request_port = serial_hsu_request_port,
- .config_port = serial_hsu_config_port,
- .verify_port = serial_hsu_verify_port,
-};
-
-static struct uart_driver serial_hsu_reg = {
- .owner = THIS_MODULE,
- .driver_name = "MFD serial",
- .dev_name = "ttyMFD",
- .major = TTY_MAJOR,
- .minor = 128,
- .nr = 3,
- .cons = SERIAL_HSU_CONSOLE,
-};
-
-#ifdef CONFIG_PM
-static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
-
- /* Make sure this is not the internal dma controller */
- if (priv && (pdev->device != 0x081E)) {
- up = priv;
- uart_suspend_port(&serial_hsu_reg, &up->port);
- }
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int serial_hsu_resume(struct pci_dev *pdev)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- ret = pci_enable_device(pdev);
- if (ret)
- dev_warn(&pdev->dev,
- "HSU: can't re-enable device, try to continue\n");
-
- if (priv && (pdev->device != 0x081E)) {
- up = priv;
- uart_resume_port(&serial_hsu_reg, &up->port);
- }
- return 0;
-}
-#else
-#define serial_hsu_suspend NULL
-#define serial_hsu_resume NULL
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int serial_hsu_runtime_idle(struct device *dev)
-{
- int err;
-
- err = pm_schedule_suspend(dev, 500);
- if (err)
- return -EBUSY;
-
- return 0;
-}
-
-static int serial_hsu_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int serial_hsu_runtime_resume(struct device *dev)
-{
- return 0;
-}
-#else
-#define serial_hsu_runtime_idle NULL
-#define serial_hsu_runtime_suspend NULL
-#define serial_hsu_runtime_resume NULL
-#endif
-
-static const struct dev_pm_ops serial_hsu_pm_ops = {
- .runtime_suspend = serial_hsu_runtime_suspend,
- .runtime_resume = serial_hsu_runtime_resume,
- .runtime_idle = serial_hsu_runtime_idle,
-};
-
-/* temp global pointer before we settle down on using one or four PCI dev */
-static struct hsu_port *phsu;
-
-static int serial_hsu_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct uart_hsu_port *uport;
- int index, ret;
-
- printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
- pdev->vendor, pdev->device);
-
- switch (pdev->device) {
- case 0x081B:
- index = 0;
- break;
- case 0x081C:
- index = 1;
- break;
- case 0x081D:
- index = 2;
- break;
- case 0x081E:
- /* internal DMA controller */
- index = 3;
- break;
- default:
- dev_err(&pdev->dev, "HSU: out of index!");
- return -ENODEV;
- }
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- if (index == 3) {
- /* DMA controller */
- ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
- if (ret) {
- dev_err(&pdev->dev, "can not get IRQ\n");
- goto err_disable;
- }
- pci_set_drvdata(pdev, phsu);
- } else {
- /* UART port 0~2 */
- uport = &phsu->port[index];
- uport->port.irq = pdev->irq;
- uport->port.dev = &pdev->dev;
- uport->dev = &pdev->dev;
-
- ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
- if (ret) {
- dev_err(&pdev->dev, "can not get IRQ\n");
- goto err_disable;
- }
- uart_add_one_port(&serial_hsu_reg, &uport->port);
-
- pci_set_drvdata(pdev, uport);
- }
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-
-err_disable:
- pci_disable_device(pdev);
- return ret;
-}
-
-static void hsu_global_init(void)
-{
- struct hsu_port *hsu;
- struct uart_hsu_port *uport;
- struct hsu_dma_chan *dchan;
- int i, ret;
-
- hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
- if (!hsu)
- return;
-
- /* Get basic io resource and map it */
- hsu->paddr = 0xffa28000;
- hsu->iolen = 0x1000;
-
- if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
- pr_warning("HSU: error in request mem region\n");
-
- hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
- if (!hsu->reg) {
- pr_err("HSU: error in ioremap\n");
- ret = -ENOMEM;
- goto err_free_region;
- }
-
- /* Initialise the 3 UART ports */
- uport = hsu->port;
- for (i = 0; i < 3; i++) {
- uport->port.type = PORT_MFD;
- uport->port.iotype = UPIO_MEM;
- uport->port.mapbase = (resource_size_t)hsu->paddr
- + HSU_PORT_REG_OFFSET
- + i * HSU_PORT_REG_LENGTH;
- uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
- + i * HSU_PORT_REG_LENGTH;
-
- sprintf(uport->name, "hsu_port%d", i);
- uport->port.fifosize = 64;
- uport->port.ops = &serial_hsu_pops;
- uport->port.line = i;
- uport->port.flags = UPF_IOREMAP;
- /* set the scalable maxim support rate to 2746800 bps */
- uport->port.uartclk = 115200 * 24 * 16;
-
- uport->running = 0;
- uport->txc = &hsu->chans[i * 2];
- uport->rxc = &hsu->chans[i * 2 + 1];
-
- serial_hsu_ports[i] = uport;
- uport->index = i;
-
- if (hsu_dma_enable & (1<<i))
- uport->use_dma = 1;
- else
- uport->use_dma = 0;
-
- uport++;
- }
-
- /* Initialise 6 dma channels */
- dchan = hsu->chans;
- for (i = 0; i < 6; i++) {
- dchan->id = i;
- dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- dchan->uport = &hsu->port[i/2];
- dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
- i * HSU_DMA_CHANS_REG_LENGTH;
-
- dchan++;
- }
-
- phsu = hsu;
- hsu_debugfs_init(hsu);
- return;
-
-err_free_region:
- release_mem_region(hsu->paddr, hsu->iolen);
- kfree(hsu);
- return;
-}
-
-static void serial_hsu_remove(struct pci_dev *pdev)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
-
- if (!priv)
- return;
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
-
- /* For port 0/1/2, priv is the address of uart_hsu_port */
- if (pdev->device != 0x081E) {
- up = priv;
- uart_remove_one_port(&serial_hsu_reg, &up->port);
- }
-
- pci_set_drvdata(pdev, NULL);
- free_irq(pdev->irq, priv);
- pci_disable_device(pdev);
-}
-
-/* First 3 are UART ports, and the 4th is the DMA */
-static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
- {},
-};
-
-static struct pci_driver hsu_pci_driver = {
- .name = "HSU serial",
- .id_table = pci_ids,
- .probe = serial_hsu_probe,
- .remove = serial_hsu_remove,
- .suspend = serial_hsu_suspend,
- .resume = serial_hsu_resume,
- .driver = {
- .pm = &serial_hsu_pm_ops,
- },
-};
-
-static int __init hsu_pci_init(void)
-{
- int ret;
-
- hsu_global_init();
-
- ret = uart_register_driver(&serial_hsu_reg);
- if (ret)
- return ret;
-
- return pci_register_driver(&hsu_pci_driver);
-}
-
-static void __exit hsu_pci_exit(void)
-{
- pci_unregister_driver(&hsu_pci_driver);
- uart_unregister_driver(&serial_hsu_reg);
-
- hsu_debugfs_remove(phsu);
-
- kfree(phsu);
-}
-
-module_init(hsu_pci_init);
-module_exit(hsu_pci_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:medfield-hsu");
--- /dev/null
+#ifndef _MFD_H
+#define _MFD_H
+
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/dma-direction.h>
+#include <asm/intel_mid_hsu.h>
+
+#define HSU_PORT_MAX 8
+#define HSU_DMA_BUF_SIZE 2048
+#define HSU_Q_MAX 4096
+#define HSU_CL_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#define HSU_DMA_BSR 32
+#define HSU_DMA_MOTSR 4
+#define HSU_PIO_RX_ERR 0x06
+#define HSU_PIO_RX_AVB 0x04
+#define HSU_PIO_RX_TMO 0x0C
+#define HSU_PIO_TX_REQ 0x02
+
+enum {
+ flag_console = 0,
+ flag_reopen,
+ flag_suspend,
+ flag_active,
+ flag_set_alt,
+ flag_tx_on,
+ flag_rx_on,
+ flag_rx_pending,
+ flag_startup,
+ flag_cmd_on,
+ flag_cmd_off,
+};
+
+enum {
+ qcmd_overflow = 0,
+ qcmd_get_msr,
+ qcmd_set_mcr,
+ qcmd_set_ier,
+ qcmd_start_rx,
+ qcmd_stop_rx,
+ qcmd_start_tx,
+ qcmd_stop_tx,
+ qcmd_cl,
+ qcmd_port_irq,
+ qcmd_dma_irq,
+ qcmd_enable_irq,
+ qcmd_cmd_off,
+ qcmd_max,
+};
+
+enum {
+ context_save,
+ context_load,
+};
+
+struct hsu_dma_buffer {
+ u8 *buf;
+ dma_addr_t dma_addr;
+ u32 dma_size;
+ u32 ofs;
+};
+
+struct hsu_dma_chan {
+ u32 id;
+ enum dma_data_direction dirt;
+ struct uart_hsu_port *uport;
+ void __iomem *reg;
+ u32 cr;
+ u32 dcr;
+ u32 sar;
+ u32 tsr;
+};
+
+struct dw_dma_priv {
+ struct intel_mid_dma_slave txs;
+ struct intel_mid_dma_slave rxs;
+
+ struct uart_hsu_port *up;
+
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+
+ /* phy address of the Data register */
+ dma_addr_t dma_addr;
+ struct pci_dev *dmac;
+};
+
+struct intel_dma_priv {
+ unsigned int tx_addr;
+ struct hsu_dma_chan *txc;
+ struct hsu_dma_chan *rxc;
+};
+
+struct hsu_dma_ops {
+ int (*init)(struct uart_hsu_port *up);
+ int (*exit)(struct uart_hsu_port *up);
+ int (*suspend)(struct uart_hsu_port *up);
+ int (*resume)(struct uart_hsu_port *up);
+ void (*start_tx)(struct uart_hsu_port *up);
+ void (*stop_tx)(struct uart_hsu_port *up);
+ void (*start_rx)(struct uart_hsu_port *up);
+ void (*stop_rx)(struct uart_hsu_port *up);
+ /* op will be context_save or context_load */
+ void (*context_op)(struct uart_hsu_port *up, int op);
+};
+
+struct uart_hsu_port {
+ struct uart_port port;
+ struct mutex q_mutex;
+ int q_start;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ struct tasklet_struct tasklet;
+ struct circ_buf qcirc;
+ int qbuf[HSU_Q_MAX];
+ struct circ_buf cl_circ;
+ spinlock_t cl_lock;
+
+ /* Intel HSU or Designware */
+ int hw_type;
+
+ unsigned char msr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char lsr;
+ unsigned char dll;
+ unsigned char dlm;
+ unsigned char fcr;
+ /* intel_hsu's clk param */
+ unsigned int mul;
+ unsigned int div;
+ unsigned int ps;
+
+ /* Buffered value due to runtime PM and sharing IRQ */
+ unsigned char iir;
+
+ /* intel_dw's clk param */
+ unsigned int m;
+ unsigned int n;
+
+ unsigned int lsr_break_flag;
+ char name[24];
+ int index;
+ struct device *dev;
+
+ unsigned int tx_addr;
+ struct hsu_dma_chan *txc;
+ struct hsu_dma_chan *rxc;
+ struct hsu_dma_buffer txbuf;
+ struct hsu_dma_buffer rxbuf;
+
+ unsigned char rxc_chcr_save;
+
+ unsigned long flags;
+
+ unsigned int qcmd_num;
+ unsigned int qcmd_done;
+ unsigned int port_irq_num;
+ unsigned int port_irq_cmddone;
+ unsigned int port_irq_no_alt;
+ unsigned int port_irq_no_startup;
+ unsigned int port_irq_pio_no_irq_pend;
+ unsigned int port_irq_pio_tx_req;
+ unsigned int port_irq_pio_rx_avb;
+ unsigned int port_irq_pio_rx_err;
+ unsigned int port_irq_pio_rx_timeout;
+ unsigned int cts_status;
+ unsigned int dma_irq_num;
+ unsigned int dma_invalid_irq_num;
+ unsigned int dma_irq_cmddone;
+ unsigned int dma_tx_irq_cmddone;
+ unsigned int dma_rx_irq_cmddone;
+ unsigned int dma_rx_tmt_irq_cmddone;
+ unsigned int tasklet_done;
+ unsigned int workq_done;
+ unsigned int in_workq;
+ unsigned int in_tasklet;
+
+ unsigned int byte_delay;
+
+ int use_dma; /* flag for DMA/PIO */
+ unsigned int dma_irq;
+ unsigned int port_dma_sts;
+
+ void *dma_priv;
+ struct hsu_dma_ops *dma_ops;
+ struct pm_qos_request qos;
+ int dma_inited;
+};
+
+struct hsu_port {
+ int dma_irq;
+ int port_num;
+ int irq_port_and_dma;
+ struct hsu_port_cfg *configs[HSU_PORT_MAX];
+ void __iomem *reg;
+ struct uart_hsu_port port[HSU_PORT_MAX];
+ struct hsu_dma_chan chans[HSU_PORT_MAX * 2];
+ spinlock_t dma_lock;
+ struct dentry *debugfs;
+};
+
+#define chan_readl(chan, offset) readl(chan->reg + offset)
+#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
+
+#define mfd_readl(obj, offset) readl(obj->reg + offset)
+#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
+
+static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
+{
+ unsigned int val;
+
+ if (offset > UART_MSR || up->hw_type == hsu_dw) {
+ offset <<= 2;
+ val = readl(up->port.membase + offset);
+ } else
+ val = (unsigned int)readb(up->port.membase + offset);
+
+ return val;
+}
+
+static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
+{
+ if (offset > UART_MSR || up->hw_type == hsu_dw) {
+ offset <<= 2;
+ writel(value, up->port.membase + offset);
+ } else {
+ unsigned char val = value & 0xff;
+ writeb(val, up->port.membase + offset);
+ }
+}
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd);
+extern struct hsu_dma_ops *pdw_dma_ops;
+extern struct hsu_dma_ops intel_dma_ops;
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+ resource_size_t start, resource_size_t len, int irq);
+void serial_hsu_port_free(struct uart_hsu_port *up);
+void serial_hsu_port_shutdown(struct uart_hsu_port *up);
+int serial_hsu_dma_setup(struct device *pdev,
+ resource_size_t start, resource_size_t len, unsigned int irq, int share);
+void serial_hsu_dma_free(void);
+int serial_hsu_do_suspend(struct uart_hsu_port *up);
+int serial_hsu_do_resume(struct uart_hsu_port *up);
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up);
+
+#include "mfd_trace.h"
+#endif
--- /dev/null
+/*
+ * mfd_core.c: driver core for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ * are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ * asserted, only when the HW is reset the DDCD and DDSR will
+ * be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#define CREATE_TRACE_POINTS
+#include "mfd.h"
+
+static int hsu_dma_enable = 0xff;
+module_param(hsu_dma_enable, int, 0);
+MODULE_PARM_DESC(hsu_dma_enable,
+ "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
+
+static struct hsu_port hsu;
+static struct hsu_port *phsu = &hsu;
+static struct uart_driver serial_hsu_reg;
+static struct hsu_port_cfg *hsu_port_func_cfg;
+
+static void serial_hsu_command(struct uart_hsu_port *up);
+
+int hsu_register_board_info(void *inf)
+{
+ hsu_port_func_cfg = inf;
+ return 0;
+}
+
+static inline int check_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+ struct circ_buf *circ = &up->qcirc;
+ char *buf;
+
+ buf = circ->buf + circ->tail;
+ *cmd = *buf;
+ return CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX);
+}
+
+static inline void insert_qcmd(struct uart_hsu_port *up, char cmd)
+{
+ struct circ_buf *circ = &up->qcirc;
+ char *buf;
+ char last_cmd;
+
+ trace_hsu_cmd_insert(up->index, cmd);
+ if (check_qcmd(up, &last_cmd) && last_cmd == cmd &&
+ cmd != qcmd_enable_irq && cmd != qcmd_port_irq &&
+ cmd != qcmd_dma_irq)
+ return;
+ trace_hsu_cmd_add(up->index, cmd);
+ up->qcmd_num++;
+ buf = circ->buf + circ->head;
+ if (CIRC_SPACE(circ->head, circ->tail, HSU_Q_MAX) < 1)
+ *buf = qcmd_overflow;
+ else {
+ *buf = cmd;
+ circ->head++;
+ if (circ->head == HSU_Q_MAX)
+ circ->head = 0;
+ }
+}
+
+static inline int get_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+ struct circ_buf *circ = &up->qcirc;
+ char *buf;
+
+ if (!CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX))
+ return 0;
+ buf = circ->buf + circ->tail;
+ *cmd = *buf;
+ circ->tail++;
+ if (circ->tail == HSU_Q_MAX)
+ circ->tail = 0;
+ up->qcmd_done++;
+ return 1;
+}
+
+static inline void cl_put_char(struct uart_hsu_port *up, char c)
+{
+ struct circ_buf *circ = &up->cl_circ;
+ char *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->cl_lock, flags);
+ buf = circ->buf + circ->head;
+ if (CIRC_SPACE(circ->head, circ->tail, HSU_CL_BUF_LEN) > 1) {
+ *buf = c;
+ circ->head++;
+ if (circ->head == HSU_CL_BUF_LEN)
+ circ->head = 0;
+ }
+ spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static inline int cl_get_char(struct uart_hsu_port *up, char *c)
+{
+ struct circ_buf *circ = &up->cl_circ;
+ char *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->cl_lock, flags);
+ if (!CIRC_CNT(circ->head, circ->tail, HSU_CL_BUF_LEN)) {
+ spin_unlock_irqrestore(&up->cl_lock, flags);
+ return 0;
+ }
+ buf = circ->buf + circ->tail;
+ *c = *buf;
+ circ->tail++;
+ if (circ->tail == HSU_CL_BUF_LEN)
+ circ->tail = 0;
+ spin_unlock_irqrestore(&up->cl_lock, flags);
+ return 1;
+}
+
+
+
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd)
+{
+ pm_runtime_get(up->dev);
+ insert_qcmd(up, cmd);
+ if (test_bit(flag_cmd_on, &up->flags)) {
+ if (up->use_dma)
+ tasklet_schedule(&up->tasklet);
+ else
+ queue_work(up->workqueue, &up->work);
+ }
+ pm_runtime_put(up->dev);
+}
+
+static inline void serial_sched_sync(struct uart_hsu_port *up)
+{
+ mutex_lock(&up->q_mutex);
+ if (up->q_start > 0) {
+ if (up->use_dma) {
+ tasklet_disable(&up->tasklet);
+ serial_hsu_command(up);
+ tasklet_enable(&up->tasklet);
+ } else {
+ flush_workqueue(up->workqueue);
+ }
+ }
+ mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_start(struct uart_hsu_port *up)
+{
+ unsigned long flags;
+
+ mutex_lock(&up->q_mutex);
+ up->q_start++;
+ if (up->q_start == 1) {
+ clear_bit(flag_cmd_off, &up->flags);
+ spin_lock_irqsave(&up->port.lock, flags);
+ set_bit(flag_cmd_on, &up->flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ if (up->use_dma)
+ tasklet_schedule(&up->tasklet);
+ else
+ queue_work(up->workqueue, &up->work);
+ }
+ mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_stop(struct uart_hsu_port *up)
+{
+ unsigned long flags;
+
+ mutex_lock(&up->q_mutex);
+ up->q_start--;
+ if (up->q_start == 0) {
+ spin_lock_irqsave(&up->port.lock, flags);
+ clear_bit(flag_cmd_on, &up->flags);
+ insert_qcmd(up, qcmd_cmd_off);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ if (up->use_dma) {
+ tasklet_schedule(&up->tasklet);
+ while (!test_bit(flag_cmd_off, &up->flags))
+ cpu_relax();
+ } else {
+ queue_work(up->workqueue, &up->work);
+ flush_workqueue(up->workqueue);
+ }
+ }
+ mutex_unlock(&up->q_mutex);
+}
+
+static void serial_set_alt(int index)
+{
+ struct uart_hsu_port *up = phsu->port + index;
+ struct hsu_dma_chan *txc = up->txc;
+ struct hsu_dma_chan *rxc = up->rxc;
+ struct hsu_port_cfg *cfg = phsu->configs[index];
+
+ if (test_bit(flag_set_alt, &up->flags))
+ return;
+
+ trace_hsu_func_start(up->index, __func__);
+ pm_runtime_get_sync(up->dev);
+ disable_irq(up->port.irq);
+ disable_irq(up->dma_irq);
+ serial_sched_stop(up);
+ if (up->use_dma && up->hw_type == hsu_intel) {
+ txc->uport = up;
+ rxc->uport = up;
+ }
+ dev_set_drvdata(up->dev, up);
+ if (cfg->hw_set_alt)
+ cfg->hw_set_alt(index);
+ if (cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 0);
+ set_bit(flag_set_alt, &up->flags);
+ serial_sched_start(up);
+ enable_irq(up->dma_irq);
+ enable_irq(up->port.irq);
+ pm_runtime_put(up->dev);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_clear_alt(int index)
+{
+ struct uart_hsu_port *up = phsu->port + index;
+ struct hsu_port_cfg *cfg = phsu->configs[index];
+
+ if (!test_bit(flag_set_alt, &up->flags))
+ return;
+
+ pm_runtime_get_sync(up->dev);
+ disable_irq(up->port.irq);
+ disable_irq(up->dma_irq);
+ serial_sched_stop(up);
+ if (cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 1);
+ clear_bit(flag_set_alt, &up->flags);
+ serial_sched_start(up);
+ enable_irq(up->dma_irq);
+ enable_irq(up->port.irq);
+ pm_runtime_put(up->dev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HSU_DBGFS_BUFSIZE 8192
+
+static int hsu_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct uart_hsu_port *up = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ pm_runtime_get_sync(up->dev);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MFD HSU port[%d] regs:\n", up->index);
+
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
+ pm_runtime_put(up->dev);
+
+ if (len > HSU_DBGFS_BUFSIZE)
+ len = HSU_DBGFS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hsu_dma_chan *chan = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ pm_runtime_get_sync(chan->uport->dev);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MFD HSU DMA channel [%d] regs:\n", chan->id);
+
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
+ pm_runtime_put(chan->uport->dev);
+
+ if (len > HSU_DBGFS_BUFSIZE)
+ len = HSU_DBGFS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t hsu_dump_show(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct uart_hsu_port *up;
+ struct hsu_port_cfg *cfg;
+ char *buf;
+ char cmd;
+ int i;
+ u32 len = 0;
+ ssize_t ret;
+ struct irq_desc *dma_irqdesc = irq_to_desc(phsu->dma_irq);
+ struct irq_desc *port_irqdesc;
+ struct circ_buf *xmit;
+
+ buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "HSU status dump:\n");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tdma irq (>0: disable): %d\n",
+ dma_irqdesc ? dma_irqdesc->depth : 0);
+ for (i = 0; i < phsu->port_num; i++) {
+ up = phsu->port + i;
+ cfg = hsu_port_func_cfg + i;
+ port_irqdesc = irq_to_desc(up->port.irq);
+ xmit = &up->port.state->xmit;
+
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "HSU port[%d] %s:\n", up->index, cfg->name);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "xmit empty[%d] xmit pending[%d]\n",
+ uart_circ_empty(xmit),
+ (int)uart_circ_chars_pending(xmit));
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tsuspend idle: %d\n", cfg->idle);
+ if (cfg->has_alt)
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\talt port: %d\n", cfg->alt);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tforce_suspend: %d\n", cfg->force_suspend);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tcts status: %d\n", up->cts_status);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tuse_dma: %s\n",
+ up->use_dma ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tflag_console: %s\n",
+ test_bit(flag_console, &up->flags) ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tflag_suspend: %s\n",
+ test_bit(flag_suspend, &up->flags) ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tflag_active: %s\n",
+ test_bit(flag_active, &up->flags) ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tflag_set_alt: %s\n",
+ test_bit(flag_set_alt, &up->flags) ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tflag_startup: %s\n",
+ test_bit(flag_startup, &up->flags) ? "yes" : "no");
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tqcmd q_start: %d\n", up->q_start);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tqcmd total count: %d\n", up->qcmd_num);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tqcmd done count: %d\n", up->qcmd_done);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq count: %d\n", up->port_irq_num);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq cmddone: %d\n", up->port_irq_cmddone);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq cts: %d\n", up->port.icount.cts);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq rng: %d\n", up->port.icount.rng);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq dsr: %d\n", up->port.icount.dsr);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq no irq pending: %d\n",
+ up->port_irq_pio_no_irq_pend);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq no alt: %d\n",
+ up->port_irq_no_alt);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq no startup: %d\n",
+ up->port_irq_no_startup);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq pio rx error: %d\n",
+ up->port_irq_pio_rx_err);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq pio rx available: %d\n",
+ up->port_irq_pio_rx_avb);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq pio rx fifo timeout: %d\n",
+ up->port_irq_pio_rx_timeout);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq pio tx request: %d\n",
+ up->port_irq_pio_tx_req);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tdma invalid irq count: %d\n",
+ up->dma_invalid_irq_num);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tdma irq count: %d\n", up->dma_irq_num);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tdma irq cmddone: %d\n", up->dma_irq_cmddone);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tdma tx irq cmddone: %d\n",
+ up->dma_tx_irq_cmddone);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport&dma rx irq cmddone: %d\n",
+ up->dma_rx_irq_cmddone);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport&dma rx timeout irq cmddone: %d\n",
+ up->dma_rx_tmt_irq_cmddone);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\ttasklet done: %d\n", up->tasklet_done);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tworkq done: %d\n", up->workq_done);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tqcmd pending count: %d\n", check_qcmd(up, &cmd));
+ if (check_qcmd(up, &cmd))
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tqcmd pending next: %d\n", cmd);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tin tasklet: %d\n", up->in_tasklet);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tin workq: %d\n", up->in_workq);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tport irq (>0: disable): %d\n",
+ port_irqdesc ? port_irqdesc->depth : 0);
+ len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+ "\tbyte delay: %d\n", up->byte_delay);
+ }
+ if (len > HSU_DBGFS_BUFSIZE)
+ len = HSU_DBGFS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+
+static const struct file_operations port_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = port_show_regs,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations dma_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = dma_show_regs,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations hsu_dump_ops = {
+ .owner = THIS_MODULE,
+ .read = hsu_dump_show,
+ .llseek = default_llseek,
+};
+
+static int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ int i;
+ char name[32];
+
+ hsu->debugfs = debugfs_create_dir("hsu", NULL);
+ if (!hsu->debugfs)
+ return -ENOMEM;
+
+ for (i = 0; i < 3; i++) {
+ snprintf(name, sizeof(name), "port_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
+ }
+
+ for (i = 0; i < 6; i++) {
+ snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
+ }
+
+ snprintf(name, sizeof(name), "dump_status");
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, NULL, &hsu_dump_ops);
+
+ return 0;
+}
+
+static void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+ if (hsu->debugfs)
+ debugfs_remove_recursive(hsu->debugfs);
+}
+
+#else
+static inline int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ return 0;
+}
+
+static inline void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void serial_hsu_enable_ms(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ up->ier |= UART_IER_MSI;
+ serial_sched_cmd(up, qcmd_set_ier);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* Protected by spin_lock_irqsave(port->lock) */
+static void serial_hsu_start_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ serial_sched_cmd(up, qcmd_start_tx);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_stop_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ serial_sched_cmd(up, qcmd_stop_tx);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_stop_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ serial_sched_cmd(up, qcmd_stop_tx);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* This is always called in spinlock protected mode, so
+ * modify timeout timer is safe here */
+void intel_dma_do_rx(struct uart_hsu_port *up, u32 int_sts)
+{
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct hsu_dma_chan *chan = up->rxc;
+ struct uart_port *port = &up->port;
+ struct tty_struct *tty;
+ struct tty_port *tport = &port->state->port;
+ int count;
+
+ trace_hsu_func_start(up->index, __func__);
+ tty = tty_port_tty_get(&up->port.state->port);
+ if (!tty) {
+ trace_hsu_func_end(up->index, __func__, "notty");
+ return;
+ }
+
+ /*
+ * First need to know how many is already transferred,
+ * then check if its a timeout DMA irq, and return
+ * the trail bytes out, push them up and reenable the
+ * channel
+ */
+
+ /* Timeout IRQ, need wait some time, see Errata 2 */
+ if (int_sts & 0xf00) {
+ up->dma_rx_tmt_irq_cmddone++;
+ udelay(2);
+ } else
+ up->dma_rx_irq_cmddone++;
+
+ /* Stop the channel */
+ chan_writel(chan, HSU_CH_CR, 0x0);
+
+ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+ if (!count) {
+ /* Restart the channel before we leave */
+ chan_writel(chan, HSU_CH_CR, 0x3);
+ tty_kref_put(tty);
+ trace_hsu_func_end(up->index, __func__, "nodata");
+ return;
+ }
+
+ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /*
+ * Head will only wrap around when we recycle
+ * the DMA buffer, and when that happens, we
+ * explicitly set tail to 0. So head will
+ * always be greater than tail.
+ */
+ tty_insert_flip_string(tport, dbuf->buf, count);
+ port->icount.rx += count;
+
+ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /* Reprogram the channel */
+ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(chan, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
+ );
+ tty_flip_buffer_push(tport);
+
+ chan_writel(chan, HSU_CH_CR, 0x3);
+ tty_kref_put(tty);
+ trace_hsu_func_end(up->index, __func__, "");
+
+}
+
+static void serial_hsu_stop_rx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ serial_sched_cmd(up, qcmd_stop_rx);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static inline void receive_chars(struct uart_hsu_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *tport = &up->port.state->port;
+ unsigned int ch, flag;
+ unsigned int max_count = 256;
+
+ if (!tty)
+ return;
+
+ trace_hsu_func_start(up->index, __func__);
+ do {
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+
+ dev_warn(up->dev,
+ "We really rush into ERR/BI case"
+ "status = 0x%02x\n", *status);
+ /* For statistics only */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /* Mask off conditions which should be ignored. */
+ *status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+ if (up->port.cons &&
+ up->port.cons->index == up->port.line) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+ ignore_char:
+ *status = serial_in(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && max_count--);
+
+ tty_flip_buffer_push(tport);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void transmit_chars(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ trace_hsu_func_start(up->index, __func__);
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ trace_hsu_func_end(up->index, __func__, "x_char");
+ goto out;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ hsu_stop_tx(&up->port);
+ if (uart_circ_empty(xmit))
+ trace_hsu_func_end(up->index, __func__, "empty");
+ else
+ trace_hsu_func_end(up->index, __func__, "stop");
+ goto out;
+ }
+
+ /* The IRQ is for TX FIFO half-empty */
+ count = up->port.fifosize / 2;
+
+ do {
+ if (uart_tx_stopped(&up->port)) {
+ hsu_stop_tx(&up->port);
+ break;
+ }
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit)) {
+ hsu_stop_tx(&up->port);
+ trace_hsu_func_end(up->index, __func__, "tx_complete");
+ }
+ else
+ trace_hsu_func_end(up->index, __func__, "");
+
+out:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void check_modem_status(struct uart_hsu_port *up)
+{
+ struct uart_port *uport = &up->port;
+ struct tty_port *port = &uport->state->port;
+ struct tty_struct *tty = port->tty;
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+ int status;
+ int delta_msr = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ status = serial_in(up, UART_MSR);
+ trace_hsu_mctrl(up->index, status);
+ if (port->flags & ASYNC_CTS_FLOW && !cfg->hw_ctrl_cts) {
+ if (tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ serial_sched_cmd(up, qcmd_start_tx);
+ tty->hw_stopped = 0;
+ up->cts_status = 0;
+ uport->icount.cts++;
+ delta_msr = 1;
+ uart_write_wakeup(uport);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ /* Is this automitically controlled */
+ if (up->use_dma)
+ up->dma_ops->stop_tx(up);
+ clear_bit(flag_tx_on, &up->flags);
+ tty->hw_stopped = 1;
+ up->cts_status = 1;
+ delta_msr = 1;
+ uport->icount.cts++;
+ }
+ }
+ }
+
+ if ((status & UART_MSR_ANY_DELTA)) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ /* We may only get DDCD when HW init and reset */
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port,
+ status & UART_MSR_DCD);
+ delta_msr = 1;
+ }
+
+ if (delta_msr)
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_dma_chan_handler(struct hsu_port *hsu, int index)
+{
+ unsigned long flags;
+ struct uart_hsu_port *up = hsu->chans[index * 2].uport;
+ struct hsu_dma_chan *txc = up->txc;
+ struct hsu_dma_chan *rxc = up->rxc;
+
+ up->dma_irq_num++;
+ if (unlikely(!up->use_dma
+ || !test_bit(flag_startup, &up->flags))) {
+ up->dma_invalid_irq_num++;
+ chan_readl(txc, HSU_CH_SR);
+ chan_readl(rxc, HSU_CH_SR);
+ return;
+ }
+ disable_irq_nosync(up->dma_irq);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_dma_irq);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static irqreturn_t hsu_port_irq(int irq, void *dev_id)
+{
+ struct uart_hsu_port *up = dev_id;
+ unsigned long flags;
+ u8 lsr;
+
+ trace_hsu_func_start(up->index, __func__);
+ up->port_irq_num++;
+
+ if (up->hw_type == hsu_intel) {
+ if (unlikely(!test_bit(flag_set_alt, &up->flags))) {
+ up->port_irq_no_alt++;
+ trace_hsu_func_end(up->index, __func__, "noalt");
+ return IRQ_NONE;
+ }
+ } else {
+ if (unlikely(test_bit(flag_suspend, &up->flags))) {
+ trace_hsu_func_end(up->index, __func__, "suspend");
+ return IRQ_NONE;
+ }
+
+ /* On BYT, this IRQ may be shared with other HW */
+ up->iir = serial_in(up, UART_IIR);
+ if (unlikely(up->iir & 0x1)) {
+ /*
+ * Read UART_BYTE_COUNT and UART_OVERFLOW
+ * registers to clear the overrun error on
+ * Tx. This is a HW issue on VLV2 B0.
+ * more information on HSD 4683358.
+ */
+ serial_in(up, 0x818 / 4);
+ serial_in(up, 0x820 / 4);
+ trace_hsu_func_end(up->index, __func__, "workaround");
+ return IRQ_NONE;
+ }
+ }
+
+ if (unlikely(!test_bit(flag_startup, &up->flags))) {
+ pr_err("recv IRQ when we are not startup yet\n");
+ /*SCU might forward it too late when it is closed already*/
+ serial_in(up, UART_LSR);
+ up->port_irq_no_startup++;
+ trace_hsu_func_end(up->index, __func__, "nostart");
+ return IRQ_HANDLED;
+ }
+
+ /* DesignWare HW's DMA mode still needs the port irq */
+ if (up->use_dma && up->hw_type == hsu_intel) {
+ lsr = serial_in(up, UART_LSR);
+ spin_lock_irqsave(&up->port.lock, flags);
+ check_modem_status(up);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE)))
+ dev_warn(up->dev,
+ "Got LSR irq(0x%02x) while using DMA", lsr);
+ trace_hsu_func_end(up->index, __func__, "lsr");
+ return IRQ_HANDLED;
+ }
+
+ disable_irq_nosync(up->port.irq);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_port_irq);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ trace_hsu_func_end(up->index, __func__, "");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hsu_dma_irq(int irq, void *dev_id)
+{
+ struct uart_hsu_port *up;
+ unsigned long flags;
+ unsigned int dmairq;
+ int i;
+
+ spin_lock_irqsave(&phsu->dma_lock, flags);
+ dmairq = mfd_readl(phsu, HSU_GBL_DMAISR);
+ if (phsu->irq_port_and_dma) {
+ up = dev_id;
+ up->port_dma_sts = dmairq;
+ if (up->port_dma_sts & (3 << (up->index * 2)))
+ hsu_dma_chan_handler(phsu, up->index);
+ } else {
+ for (i = 0; i < 3; i++)
+ if (dmairq & (3 << (i * 2))) {
+ up = phsu->chans[i * 2].uport;
+ up->port_dma_sts = dmairq;
+ hsu_dma_chan_handler(phsu, i);
+ }
+ }
+ spin_unlock_irqrestore(&phsu->dma_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_hsu_tx_empty(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ int ret = 1;
+
+ trace_hsu_func_start(up->index, __func__);
+ pm_runtime_get_sync(up->dev);
+ serial_sched_stop(up);
+
+ if (up->use_dma && test_bit(flag_tx_on, &up->flags))
+ ret = 0;
+ ret = ret &&
+ (serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0);
+ serial_sched_start(up);
+ pm_runtime_put(up->dev);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+
+static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned char status = up->msr;
+ unsigned int ret = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+
+static void set_mctrl(struct uart_hsu_port *up, unsigned int mctrl)
+{
+ trace_hsu_func_start(up->index, __func__);
+ if (mctrl & TIOCM_RTS)
+ up->mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ up->mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ up->mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ up->mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ up->mcr |= UART_MCR_LOOP;
+ trace_hsu_mctrl(up->index, mctrl);
+ serial_out(up, UART_MCR, up->mcr);
+ udelay(100);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ if (mctrl & TIOCM_RTS)
+ up->mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ up->mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ up->mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ up->mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ up->mcr |= UART_MCR_LOOP;
+ serial_sched_cmd(up, qcmd_set_mcr);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ trace_hsu_func_start(up->index, __func__);
+ pm_runtime_get_sync(up->dev);
+ serial_sched_stop(up);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ serial_sched_start(up);
+ pm_runtime_put(up->dev);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+/*
+ * What special to do:
+ * 1. chose the 64B fifo mode
+ * 2. start dma or pio depends on configuration
+ * 3. we only allocate dma memory when needed
+ */
+static int serial_hsu_startup(struct uart_port *port)
+{
+ static int console_first_init = 1;
+ int ret = 0;
+ unsigned long flags;
+ static DEFINE_MUTEX(lock);
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ mutex_lock(&lock);
+
+ pm_runtime_get_sync(up->dev);
+
+ /* HW start it */
+ if (cfg->hw_reset)
+ cfg->hw_reset(up->port.membase);
+
+ if (console_first_init && test_bit(flag_console, &up->flags)) {
+ serial_sched_stop(up);
+ console_first_init = 0;
+ }
+ clear_bit(flag_reopen, &up->flags);
+ if (cfg->has_alt) {
+ struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+ struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+ if (test_bit(flag_startup, &alt_up->flags) &&
+ alt_up->port.state->port.tty) {
+ if (alt_cfg->force_suspend) {
+ uart_suspend_port(&serial_hsu_reg,
+ &alt_up->port);
+ serial_clear_alt(alt_up->index);
+ set_bit(flag_reopen, &alt_up->flags);
+ } else {
+ int loop = 50;
+
+ while (test_bit(flag_startup,
+ &alt_up->flags) && --loop)
+ msleep(20);
+ if (test_bit(flag_startup, &alt_up->flags)) {
+ WARN(1, "Share port open timeout\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+ }
+ }
+ serial_set_alt(up->index);
+ serial_sched_start(up);
+ serial_sched_stop(up);
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+
+ /* Clear the interrupt registers. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /* Now, initialize the UART, default is 8n1 */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+ up->port.mctrl |= TIOCM_OUT2;
+ set_mctrl(up, up->port.mctrl);
+
+ /* DMA init */
+ if (up->use_dma) {
+ ret = up->dma_ops->init ? up->dma_ops->init(up) : -ENODEV;
+ if (ret) {
+ dev_warn(up->dev, "Fail to init DMA, will use PIO\n");
+ up->use_dma = 0;
+ }
+ }
+
+ /*
+ * Finally, enable interrupts. Note: Modem status
+ * interrupts are set via set_termios(), which will
+ * be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ /* bit 4 for DW is reserved, but SEG need it to be set */
+ if (!up->use_dma || up->hw_type == hsu_dw)
+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
+ else
+ up->ier = 0;
+ serial_out(up, UART_IER, up->ier);
+
+ /* And clear the interrupt registers again for luck. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ set_bit(flag_startup, &up->flags);
+ serial_sched_start(up);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_get_msr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_sched_sync(up);
+
+out:
+ pm_runtime_put(up->dev);
+ mutex_unlock(&lock);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+
+static void serial_hsu_shutdown(struct uart_port *port)
+{
+ static DEFINE_MUTEX(lock);
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ mutex_lock(&lock);
+ pm_runtime_get_sync(up->dev);
+ serial_sched_stop(up);
+ clear_bit(flag_startup, &up->flags);
+
+ /* Disable interrupts from this port */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ clear_bit(flag_tx_on, &up->flags);
+
+ up->port.mctrl &= ~TIOCM_OUT2;
+ set_mctrl(up, up->port.mctrl);
+
+ /* Disable break condition and FIFOs */
+ serial_out(up, UART_LCR,
+ serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+
+ /* Free allocated dma buffer */
+ if (up->use_dma)
+ up->dma_ops->exit(up);
+
+ if (cfg->has_alt) {
+ struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+ struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+ if (test_bit(flag_reopen, &alt_up->flags)) {
+ serial_clear_alt(up->index);
+ uart_resume_port(&serial_hsu_reg, &alt_up->port);
+ }
+ }
+
+ pm_runtime_put_sync(up->dev);
+ mutex_unlock(&lock);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* calculate mul,div for low fref e.g. TNG B0 38.4M
+ * finally the fref will swith to high fref e.g. 100M
+*/
+static bool calc_for_low_fref(u32 clock, u32 baud, u32 *mul, u32 *div)
+{
+ if (clock == 38400) {
+ switch (baud) {
+ case 3500000:
+ /* ps: 10 */
+ *mul = 350;
+ *div = 384;
+ break;
+ case 3000000:
+ /* ps: 12 */
+ *mul = 360;
+ *div = 384;
+ break;
+ case 2500000:
+ /* ps: 12 */
+ *mul = 300;
+ *div = 384;
+ break;
+ case 2000000:
+ /* ps: 16 */
+ *mul = 320;
+ *div = 384;
+ break;
+ case 1843200:
+ /* ps: 16 */
+ *mul = 294912;
+ *div = 384000;
+ break;
+ case 1500000:
+ /* ps: 16 */
+ *mul = 240;
+ *div = 384;
+ break;
+ case 1000000:
+ /* ps: 16 */
+ *mul = 160;
+ *div = 384;
+ break;
+ case 500000:
+ /* ps: 16 */
+ *mul = 80;
+ *div = 384;
+ break;
+ }
+ return true;
+ } else
+ return false;
+}
+
+static void
+serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot, clock, bits;
+ /* 0x3d09 is default dividor value refer for deatils
+ * please refer high speed UART HAS documents.
+ */
+ u32 ps = 0, mul = 0, div = 0x3D09, m = 0, n = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ bits = 7;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ bits = 8;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ bits = 9;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ bits = 10;
+ break;
+ }
+
+ /* CMSPAR isn't supported by this driver */
+ termios->c_cflag &= ~CMSPAR;
+
+ if (termios->c_cflag & CSTOPB) {
+ cval |= UART_LCR_STOP;
+ bits++;
+ }
+ if (termios->c_cflag & PARENB) {
+ cval |= UART_LCR_PARITY;
+ bits++;
+ }
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ trace_hsu_set_termios(up->index, baud, termios->c_cflag & CRTSCTS ? 1 : 0);
+
+ if (up->hw_type == hsu_intel) {
+ /*
+ * If base clk is 50Mhz, and the baud rate come from:
+ * baud = 50M * MUL / (DIV * PS * DLAB)
+ *
+ * For those basic low baud rate we can get the direct
+ * scalar from 2746800, like 115200 = 2746800/24. For those
+ * higher baud rate, we handle them case by case, mainly by
+ * adjusting the MUL/PS registers, and DIV register is kept
+ * as default value 0x3d09 to make things simple
+ */
+
+ if (cfg->hw_get_clk)
+ clock = cfg->hw_get_clk();
+ else
+ clock = 50000;
+ /* ps = 16 is prefered, if not have to use 12, l0 */
+ if (baud * 16 <= clock * 1000)
+ ps = 16;
+ else if (baud * 12 <= clock * 1000)
+ ps = 12;
+ else if (baud * 10 <= clock * 1000)
+ ps = 10;
+ else
+ pr_err("port:%d baud:%d is too high for clock:%u M\n",
+ up->index, baud, clock / 1000);
+
+ switch (baud) {
+ case 3500000:
+ case 3000000:
+ case 2500000:
+ case 2000000:
+ case 1843200:
+ case 1500000:
+ case 1000000:
+ case 500000:
+ quot = 1;
+ if (!calc_for_low_fref(clock, baud, &mul, &div))
+ /*
+ * mul = baud * 0x3d09 * ps / 1000 / clock
+ * change the formula order to avoid overflow
+ */
+ mul = (0x3d09 * ps / 100) * (baud / 100)
+ * 10 / clock;
+ break;
+ default:
+ /* Use uart_get_divisor to get quot for other baud rates
+ * avoid overflow: mul = uartclk * 0x3d09 / clock / 1000
+ * uartclk is multiply of 115200 * n * 16 */
+ mul = (up->port.uartclk / 1600) * 0x3d09 /
+ clock * 16 / 10;
+ quot = 0;
+ }
+
+ if (!quot)
+ quot = uart_get_divisor(port, baud);
+
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
+ else if ((up->port.uartclk / quot) < (230400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
+
+ fcr |= UART_FCR_HSU_64B_FIFO;
+ } else {
+ /* need calc quot here */
+ switch (baud) {
+ case 3000000:
+ case 1500000:
+ case 1000000:
+ case 500000:
+ m = 48;
+ n = 100;
+ quot = 3000000 / baud;
+ break;
+ default:
+ m = 9216;
+ n = 15625;
+ quot = 0;
+ }
+ if (!quot)
+ quot = uart_get_divisor(port, baud);
+
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR_T_TRIG_11;
+ if (baud < 2400) {
+ fcr &= ~UART_FCR_TRIGGER_MASK;
+ fcr |= UART_FCR_TRIGGER_1;
+ }
+ }
+
+ /* one byte transfer duration unit microsecond */
+ up->byte_delay = (bits * 1000000 + baud - 1) / baud;
+
+ pm_runtime_get_sync(up->dev);
+ serial_sched_stop(up);
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /* Characters to ignore */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /* Ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts, disable
+ * MSI by default
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
+ else
+ up->mcr &= ~UART_MCR_AFE;
+
+ up->dll = quot & 0xff;
+ up->dlm = quot >> 8;
+ up->fcr = fcr;
+ up->lcr = cval; /* Save LCR */
+
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
+ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
+ serial_out(up, UART_DLM, up->dlm); /* MS of divisor */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+
+ if (up->hw_type == hsu_intel) {
+ up->mul = mul;
+ up->div = div;
+ up->ps = ps;
+ serial_out(up, UART_MUL, up->mul); /* set MUL */
+ serial_out(up, UART_DIV, up->div); /* set DIV */
+ serial_out(up, UART_PS, up->ps); /* set PS */
+ } else {
+ if (m != up->m || n != up->n) {
+ if (cfg->set_clk)
+ cfg->set_clk(m, n, up->port.membase);
+ up->m = m;
+ up->n = n;
+ }
+ }
+
+ serial_out(up, UART_FCR, fcr);
+ set_mctrl(up, up->port.mctrl);
+ serial_sched_cmd(up, qcmd_get_msr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_sched_start(up);
+ serial_sched_sync(up);
+ pm_runtime_put(up->dev);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void
+serial_hsu_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+}
+
+static void serial_hsu_release_port(struct uart_port *port)
+{
+}
+
+static int serial_hsu_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_hsu_config_port(struct uart_port *port, int flags)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ up->port.type = PORT_MFD;
+}
+
+static int
+serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* We don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static const char *
+serial_hsu_type(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ return up->name;
+}
+
+struct device *intel_mid_hsu_set_wake_peer(int port,
+ void (*wake_peer)(struct device *))
+{
+ struct hsu_port_cfg *cfg = phsu->configs[port];
+
+ cfg->wake_peer = wake_peer;
+ return cfg->dev;
+}
+EXPORT_SYMBOL(intel_mid_hsu_set_wake_peer);
+
+static void serial_hsu_wake_peer(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ if (cfg->wake_peer)
+ cfg->wake_peer(cfg->dev);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+/* Wait for transmitter & holding register to empty */
+static inline int wait_for_xmitr(struct uart_hsu_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ while (--tmout) {
+ status = serial_in(up, UART_LSR);
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+ udelay(1);
+ if (status & BOTH_EMPTY)
+ break;
+ }
+ if (tmout == 0)
+ return 0;
+
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 10000;
+ while (--tmout &&
+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+ udelay(1);
+ if (tmout == 0)
+ return 0;
+ }
+ return 1;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_hsu_get_poll_char(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ u8 lsr;
+
+ lsr = serial_in(up, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return NO_POLL_CHAR;
+ return serial_in(up, UART_RX);
+}
+
+static void serial_hsu_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ serial_out(up, UART_IER, 0);
+ while (!wait_for_xmitr(up))
+ cpu_relax();
+ serial_out(up, UART_TX, c);
+ while (!wait_for_xmitr(up))
+ cpu_relax();
+ serial_out(up, UART_IER, up->ier);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+static void serial_hsu_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ cl_put_char(up, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_hsu_port *up = phsu->port + co->index;
+ unsigned long flags;
+
+ uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
+ spin_lock_irqsave(&up->cl_lock, flags);
+ serial_sched_cmd(up, qcmd_cl);
+ spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static struct console serial_hsu_console;
+
+static int __init
+serial_hsu_console_setup(struct console *co, char *options)
+{
+ struct uart_hsu_port *up = phsu->port + co->index;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ unsigned long flags;
+
+ if (co->index < 0 || co->index >= hsu_port_max)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ pm_runtime_get_sync(up->dev);
+ set_bit(flag_console, &up->flags);
+ set_bit(flag_startup, &up->flags);
+ serial_set_alt(up->index);
+ serial_sched_start(up);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_get_msr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_sched_sync(up);
+ pm_runtime_put(up->dev);
+ up->cl_circ.buf = kzalloc(HSU_CL_BUF_LEN, GFP_KERNEL);
+ if (up->cl_circ.buf == NULL)
+ return -ENOMEM;
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_hsu_console = {
+ .name = "ttyMFD",
+ .write = serial_hsu_console_write,
+ .device = uart_console_device,
+ .setup = serial_hsu_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_hsu_reg,
+};
+
+#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE NULL
+#endif
+
+struct uart_ops serial_hsu_pops = {
+ .tx_empty = serial_hsu_tx_empty,
+ .set_mctrl = serial_hsu_set_mctrl,
+ .get_mctrl = serial_hsu_get_mctrl,
+ .stop_tx = serial_hsu_stop_tx,
+ .start_tx = serial_hsu_start_tx,
+ .stop_rx = serial_hsu_stop_rx,
+ .enable_ms = serial_hsu_enable_ms,
+ .break_ctl = serial_hsu_break_ctl,
+ .startup = serial_hsu_startup,
+ .shutdown = serial_hsu_shutdown,
+ .set_termios = serial_hsu_set_termios,
+ .pm = serial_hsu_pm,
+ .type = serial_hsu_type,
+ .release_port = serial_hsu_release_port,
+ .request_port = serial_hsu_request_port,
+ .config_port = serial_hsu_config_port,
+ .verify_port = serial_hsu_verify_port,
+ .wake_peer = serial_hsu_wake_peer,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial_hsu_get_poll_char,
+ .poll_put_char = serial_hsu_put_poll_char,
+#endif
+};
+
+static struct uart_driver serial_hsu_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "MFD serial",
+ .dev_name = "ttyMFD",
+ .major = TTY_MAJOR,
+ .minor = 128,
+ .nr = HSU_PORT_MAX,
+};
+
+static irqreturn_t wakeup_irq(int irq, void *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ set_bit(flag_active, &up->flags);
+ if (cfg->preamble && cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 1);
+ pm_runtime_get(dev);
+ pm_runtime_put(dev);
+ trace_hsu_func_end(up->index, __func__, "");
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
+static void hsu_flush_rxfifo(struct uart_hsu_port *up)
+{
+ unsigned int lsr, cnt;
+
+ if (up->hw_type == hsu_intel) {
+ cnt = serial_in(up, UART_FOR) & 0x7F;
+ if (cnt)
+ dev_dbg(up->dev,
+ "Warning: %d bytes are received"
+ " in RX fifo after RTS active for %d us\n",
+ cnt, up->byte_delay);
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & UART_LSR_DR && cnt)
+ dev_dbg(up->dev,
+ "flush abnormal data in rx fifo\n");
+ while (cnt) {
+ serial_in(up, UART_RX);
+ cnt--;
+ }
+ }
+}
+
+static void hsu_regs_context(struct uart_hsu_port *up, int op)
+{
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ if (op == context_load) {
+ /*
+ * Delay a while before HW get stable. Without this the
+ * resume will just fail, as the value you write to the
+ * HW register will not be really written.
+ *
+ * This is only needed for Tangier, which really powers gate
+ * the HSU HW in runtime suspend. While in Penwell/CLV it is
+ * only clock gated.
+ */
+ usleep_range(500, 510);
+
+ if (cfg->hw_reset)
+ cfg->hw_reset(up->port.membase);
+
+ serial_out(up, UART_LCR, up->lcr);
+ serial_out(up, UART_LCR, up->lcr | UART_LCR_DLAB);
+ serial_out(up, UART_DLL, up->dll);
+ serial_out(up, UART_DLM, up->dlm);
+ serial_out(up, UART_LCR, up->lcr);
+
+ if (up->hw_type == hsu_intel) {
+ serial_out(up, UART_MUL, up->mul);
+ serial_out(up, UART_DIV, up->div);
+ serial_out(up, UART_PS, up->ps);
+ } else {
+ if (cfg->set_clk)
+ cfg->set_clk(up->m, up->n, up->port.membase);
+ }
+
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_IER, up->ier);
+ }
+
+ if (up->use_dma && up->dma_ops->context_op)
+ up->dma_ops->context_op(up, op);
+}
+
+int serial_hsu_do_suspend(struct uart_hsu_port *up)
+{
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+ struct uart_port *uport = &up->port;
+ struct tty_port *tport = &uport->state->port;
+ struct tty_struct *tty = tport->tty;
+ struct circ_buf *xmit = &up->port.state->xmit;
+ char cmd;
+ unsigned long flags;
+
+ trace_hsu_func_start(up->index, __func__);
+
+ if (test_bit(flag_startup, &up->flags)) {
+ if (up->hw_type == hsu_intel &&
+ serial_in(up, UART_FOR) & 0x7F)
+ goto busy;
+ else if (up->hw_type == hsu_dw &&
+ serial_in(up, 0x7c / 4) & BIT(3))
+ goto busy;
+ }
+
+ if (up->use_dma) {
+ if (up->hw_type == hsu_intel) {
+ if (chan_readl(up->rxc, HSU_CH_D0SAR) >
+ up->rxbuf.dma_addr)
+ goto busy;
+ }
+ }
+
+ if (cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 1);
+
+ disable_irq(up->port.irq);
+ disable_irq(up->dma_irq);
+
+ if (cfg->hw_set_rts)
+ usleep_range(up->byte_delay, up->byte_delay + 1);
+
+ serial_sched_stop(up);
+ set_bit(flag_suspend, &up->flags);
+
+ if (test_bit(flag_startup, &up->flags) && check_qcmd(up, &cmd)) {
+ dev_info(up->dev, "ignore suspend cmd: %d\n", cmd);
+ goto err;
+ }
+
+ if (test_bit(flag_tx_on, &up->flags)) {
+ dev_info(up->dev, "ignore suspend for tx on\n");
+ dev_info(up->dev,
+ "xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+ (int)uart_circ_chars_pending(xmit), tty->stopped,
+ tty->hw_stopped, serial_in(up, UART_MSR));
+ goto err;
+ }
+
+ if (test_bit(flag_startup, &up->flags) && !uart_circ_empty(xmit) &&
+ !uart_tx_stopped(&up->port)) {
+ dev_info(up->dev, "ignore suspend for xmit\n");
+ dev_info(up->dev,
+ "xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+ (int)uart_circ_chars_pending(xmit),
+ tty->stopped,
+ tty->hw_stopped,
+ serial_in(up, UART_MSR));
+ goto err;
+ }
+
+ if (up->use_dma) {
+ if (up->dma_ops->suspend(up))
+ goto err;
+ } else if (test_bit(flag_startup, &up->flags)) {
+ if (up->hw_type == hsu_intel &&
+ serial_in(up, UART_FOR) & 0x7F)
+ goto err;
+ else if (up->hw_type == hsu_dw &&
+ serial_in(up, 0x7c / 4) & BIT(3))
+ goto err;
+ }
+
+ if (cfg->hw_suspend)
+ cfg->hw_suspend(up->index, up->dev, wakeup_irq);
+ if (cfg->hw_context_save)
+ hsu_regs_context(up, context_save);
+ if (cfg->preamble && cfg->hw_suspend_post)
+ cfg->hw_suspend_post(up->index);
+ enable_irq(up->dma_irq);
+ if (up->hw_type == hsu_dw)
+ enable_irq(up->port.irq);
+
+ trace_hsu_func_end(up->index, __func__, "");
+ return 0;
+err:
+ if (cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 0);
+ clear_bit(flag_suspend, &up->flags);
+ enable_irq(up->port.irq);
+ if (up->use_dma && up->hw_type == hsu_intel)
+ intel_dma_do_rx(up, 0);
+ enable_irq(up->dma_irq);
+ serial_sched_start(up);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_get_msr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_sched_sync(up);
+busy:
+ pm_schedule_suspend(up->dev, cfg->idle);
+ trace_hsu_func_end(up->index, __func__, "busy");
+ return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_suspend);
+
+int serial_hsu_do_resume(struct uart_hsu_port *up)
+{
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+ unsigned long flags;
+
+ trace_hsu_func_start(up->index, __func__);
+ if (!test_and_clear_bit(flag_suspend, &up->flags)) {
+ trace_hsu_func_end(up->index, __func__, "ignore");
+ return 0;
+ }
+ if (up->hw_type == hsu_dw)
+ disable_irq(up->port.irq);
+ if (cfg->hw_context_save)
+ hsu_regs_context(up, context_load);
+ if (cfg->hw_resume)
+ cfg->hw_resume(up->index, up->dev);
+ if (test_bit(flag_startup, &up->flags))
+ hsu_flush_rxfifo(up);
+ if (up->use_dma)
+ up->dma_ops->resume(up);
+ if (cfg->hw_set_rts)
+ cfg->hw_set_rts(up->index, 0);
+ enable_irq(up->port.irq);
+
+ serial_sched_start(up);
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_get_msr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_sched_sync(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ return 0;
+}
+EXPORT_SYMBOL(serial_hsu_do_resume);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up)
+{
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ if (cfg->type == debug_port
+ && system_state == SYSTEM_BOOTING)
+ /* if HSU is set as default console, but earlyprintk is not hsu,
+ * then it will enter suspend and can not get back since system
+ * is on boot up, no contex switch to let it resume, here just
+ * postpone the suspend retry 30 seconds, then system should
+ * have finished booting
+ */
+ pm_schedule_suspend(up->dev, 30000);
+ else
+ pm_schedule_suspend(up->dev, cfg->idle);
+ trace_hsu_func_end(up->index, __func__, "");
+ return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_runtime_idle);
+#endif
+
+static void serial_hsu_command(struct uart_hsu_port *up)
+{
+ char cmd, c;
+ unsigned long flags;
+ unsigned int iir, lsr;
+ int status;
+ struct hsu_dma_chan *txc = up->txc;
+ struct hsu_dma_chan *rxc = up->rxc;
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ trace_hsu_func_start(up->index, __func__);
+ if (unlikely(test_bit(flag_cmd_off, &up->flags))) {
+ trace_hsu_func_end(up->index, __func__, "cmd_off");
+ return;
+ }
+ if (unlikely(test_bit(flag_suspend, &up->flags))) {
+ dev_err(up->dev,
+ "Error to handle cmd while port is suspended\n");
+ if (check_qcmd(up, &cmd))
+ dev_err(up->dev, "Command pending: %d\n", cmd);
+ trace_hsu_func_end(up->index, __func__, "suspend");
+ return;
+ }
+ set_bit(flag_active, &up->flags);
+ spin_lock_irqsave(&up->port.lock, flags);
+ while (get_qcmd(up, &cmd)) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ trace_hsu_cmd_start(up->index, cmd);
+ switch (cmd) {
+ case qcmd_overflow:
+ dev_err(up->dev, "queue overflow!!\n");
+ break;
+ case qcmd_set_mcr:
+ serial_out(up, UART_MCR, up->mcr);
+ break;
+ case qcmd_set_ier:
+ serial_out(up, UART_IER, up->ier);
+ break;
+ case qcmd_start_rx:
+ /* use for DW DMA RX only */
+ if (test_and_clear_bit(flag_rx_pending, &up->flags)) {
+ if (up->use_dma)
+ up->dma_ops->start_rx(up);
+ }
+ break;
+ case qcmd_stop_rx:
+ if (!up->use_dma || up->hw_type == hsu_dw) {
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+ }
+
+ if (up->use_dma)
+ up->dma_ops->stop_rx(up);
+ break;
+ case qcmd_start_tx:
+ if (up->use_dma) {
+ if (!test_bit(flag_tx_on, &up->flags))
+ up->dma_ops->start_tx(up);
+ } else if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+ break;
+ case qcmd_stop_tx:
+ if (up->use_dma) {
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->dma_ops->stop_tx(up);
+ clear_bit(flag_tx_on, &up->flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ } else if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+ break;
+ case qcmd_cl:
+ serial_out(up, UART_IER, 0);
+ while (cl_get_char(up, &c)) {
+ while (!wait_for_xmitr(up))
+ schedule();
+ serial_out(up, UART_TX, c);
+ }
+ serial_out(up, UART_IER, up->ier);
+ break;
+ case qcmd_port_irq:
+ up->port_irq_cmddone++;
+
+ /* Baytrail patform use shared IRQ and need more care */
+ if (up->hw_type == hsu_intel) {
+ iir = serial_in(up, UART_IIR);
+ } else {
+ if (up->iir & 0x1)
+ up->iir = serial_in(up, UART_IIR);
+ iir = up->iir;
+ up->iir = 1;
+ }
+
+ if (iir & UART_IIR_NO_INT) {
+ enable_irq(up->port.irq);
+ up->port_irq_pio_no_irq_pend++;
+ break;
+ }
+
+ if (iir & HSU_PIO_RX_ERR)
+ up->port_irq_pio_rx_err++;
+ if (iir & HSU_PIO_RX_AVB)
+ up->port_irq_pio_rx_avb++;
+ if (iir & HSU_PIO_RX_TMO)
+ up->port_irq_pio_rx_timeout++;
+ if (iir & HSU_PIO_TX_REQ)
+ up->port_irq_pio_tx_req++;
+
+ lsr = serial_in(up, UART_LSR);
+
+ /* We need to judge it's timeout or data available */
+ if (lsr & UART_LSR_DR) {
+ if (!up->use_dma) {
+ receive_chars(up, &lsr);
+ } else if (up->hw_type == hsu_dw) {
+ if ((iir & 0xf) == 0xc) {
+ /*
+ * RX timeout IRQ, the DMA
+ * channel may be stalled
+ */
+ up->dma_ops->stop_rx(up);
+ receive_chars(up, &lsr);
+ } else
+ up->dma_ops->start_rx(up);
+ }
+ }
+
+ /* lsr will be renewed during the receive_chars */
+ if (!up->use_dma && (lsr & UART_LSR_THRE))
+ transmit_chars(up);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_enable_irq);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ break;
+ case qcmd_enable_irq:
+ enable_irq(up->port.irq);
+ break;
+ case qcmd_dma_irq:
+ /* Only hsu_intel has this irq */
+ up->dma_irq_cmddone++;
+ if (up->port_dma_sts & (1 << txc->id)) {
+ up->dma_tx_irq_cmddone++;
+ status = chan_readl(txc, HSU_CH_SR);
+ up->dma_ops->start_tx(up);
+ }
+
+ if (up->port_dma_sts & (1 << rxc->id)) {
+ status = chan_readl(rxc, HSU_CH_SR);
+ intel_dma_do_rx(up, status);
+ }
+ enable_irq(up->dma_irq);
+ break;
+ case qcmd_cmd_off:
+ set_bit(flag_cmd_off, &up->flags);
+ break;
+ case qcmd_get_msr:
+ break;
+ default:
+ dev_err(up->dev, "invalid command!!\n");
+ break;
+ }
+ trace_hsu_cmd_end(up->index, cmd);
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (unlikely(test_bit(flag_cmd_off, &up->flags)))
+ break;
+ }
+ up->msr = serial_in(up, UART_MSR);
+ if (cfg->hw_ctrl_cts)
+ up->msr |= UART_MSR_CTS;
+ check_modem_status(up);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_tasklet(unsigned long data)
+{
+ struct uart_hsu_port *up = (struct uart_hsu_port *)data;
+
+ up->in_tasklet = 1;
+ serial_hsu_command(up);
+ up->tasklet_done++;
+ up->in_tasklet = 0;
+}
+
+static void serial_hsu_work(struct work_struct *work)
+{
+ struct uart_hsu_port *uport =
+ container_of(work, struct uart_hsu_port, work);
+
+ uport->in_workq = 1;
+ serial_hsu_command(uport);
+ uport->workq_done++;
+ uport->in_workq = 0;
+}
+
+static int serial_port_setup(struct uart_hsu_port *up,
+ struct hsu_port_cfg *cfg)
+{
+ int ret;
+ int index = cfg->index;
+
+ phsu->configs[index] = cfg;
+ up->port.line = index;
+ snprintf(up->name, sizeof(up->name) - 1, "%s_p", cfg->name);
+ up->index = index;
+
+ if ((hsu_dma_enable & (1 << index)) && up->dma_ops)
+ up->use_dma = 1;
+ else
+ up->use_dma = 0;
+
+ if (cfg->hw_init)
+ cfg->hw_init(up->dev, index);
+ mutex_init(&up->q_mutex);
+ tasklet_init(&up->tasklet, serial_hsu_tasklet,
+ (unsigned long)up);
+ up->workqueue =
+ create_singlethread_workqueue(up->name);
+ INIT_WORK(&up->work, serial_hsu_work);
+ up->qcirc.buf = (char *)up->qbuf;
+ spin_lock_init(&up->cl_lock);
+ set_bit(flag_cmd_off, &up->flags);
+
+ if (cfg->type == debug_port) {
+ serial_hsu_reg.cons = SERIAL_HSU_CONSOLE;
+ if (serial_hsu_reg.cons)
+ serial_hsu_reg.cons->index = index;
+ } else
+ serial_hsu_reg.cons = NULL;
+
+ uart_add_one_port(&serial_hsu_reg, &up->port);
+
+ if (phsu->irq_port_and_dma) {
+ up->dma_irq = up->port.irq;
+ ret = request_irq(up->dma_irq, hsu_dma_irq, IRQF_SHARED,
+ "hsu dma", up);
+ if (ret) {
+ dev_err(up->dev, "can not get dma IRQ\n");
+ return ret;
+ }
+ ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+ up->name, up);
+ if (ret) {
+ dev_err(up->dev, "can not get port IRQ\n");
+ return ret;
+ }
+ } else {
+ up->dma_irq = phsu->dma_irq;
+ ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+ up->name, up);
+ if (ret) {
+ dev_err(up->dev, "can not get port IRQ\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+ resource_size_t start, resource_size_t len, int irq)
+{
+ struct uart_hsu_port *up;
+ int index;
+ unsigned int uclk, clock;
+ struct hsu_port_cfg *cfg;
+
+ cfg = hsu_port_func_cfg + port;
+ if (!cfg)
+ return ERR_PTR(-EINVAL);
+
+ pr_info("Found a %s HSU\n", cfg->hw_ip ? "Designware" : "Intel");
+
+ index = cfg->index;
+ up = phsu->port + index;
+
+ up->dev = pdev;
+ up->port.type = PORT_MFD;
+ up->port.iotype = UPIO_MEM;
+ up->port.mapbase = start;
+ up->port.membase = ioremap_nocache(up->port.mapbase, len);
+ up->port.fifosize = 64;
+ up->port.ops = &serial_hsu_pops;
+ up->port.flags = UPF_IOREMAP;
+ up->hw_type = cfg->hw_ip;
+ /* calculate if DLAB=1, the ideal uartclk */
+ if (cfg->hw_get_clk)
+ clock = cfg->hw_get_clk();
+ else
+ clock = 50000;
+ uclk = clock * 1000 / (115200 * 16); /* 16 is default ps */
+ if (uclk >= 24)
+ uclk = 24;
+ else if (uclk >= 16)
+ uclk = 16;
+ else if (uclk >= 8)
+ uclk = 8;
+ else
+ uclk = 1;
+
+ if (up->hw_type == hsu_intel)
+ up->port.uartclk = 115200 * uclk * 16;
+ else
+ up->port.uartclk = 115200 * 32 * 16;
+
+ up->port.irq = irq;
+ up->port.dev = pdev;
+
+ if (up->hw_type == hsu_intel) {
+ up->txc = &phsu->chans[index * 2];
+ up->rxc = &phsu->chans[index * 2 + 1];
+ up->dma_ops = &intel_dma_ops;
+ } else {
+ up->dma_ops = pdw_dma_ops;
+ }
+
+ if (cfg->has_alt) {
+ struct hsu_port_cfg *alt_cfg =
+ hsu_port_func_cfg + cfg->alt;
+ struct uart_hsu_port *alt_up =
+ phsu->port + alt_cfg->index;
+
+ memcpy(alt_up, up, sizeof(*up));
+ serial_port_setup(alt_up, alt_cfg);
+ phsu->port_num++;
+ }
+
+ serial_port_setup(up, cfg);
+ phsu->port_num++;
+
+ return up;
+}
+EXPORT_SYMBOL(serial_hsu_port_setup);
+
+void serial_hsu_port_free(struct uart_hsu_port *up)
+{
+ struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+ uart_remove_one_port(&serial_hsu_reg, &up->port);
+ free_irq(up->port.irq, up);
+ if (cfg->has_alt) {
+ struct hsu_port_cfg *alt_cfg = phsu->configs[cfg->alt];
+ struct uart_hsu_port *alt_up =
+ phsu->port + alt_cfg->index;
+ uart_remove_one_port(&serial_hsu_reg, &alt_up->port);
+ free_irq(up->port.irq, alt_up);
+ }
+}
+EXPORT_SYMBOL(serial_hsu_port_free);
+
+void serial_hsu_port_shutdown(struct uart_hsu_port *up)
+{
+ uart_suspend_port(&serial_hsu_reg, &up->port);
+}
+EXPORT_SYMBOL(serial_hsu_port_shutdown);
+
+int serial_hsu_dma_setup(struct device *pdev,
+ resource_size_t start, resource_size_t len, unsigned int irq, int share)
+{
+ struct hsu_dma_chan *dchan;
+ int i, ret;
+
+ phsu->reg = ioremap_nocache(start, len);
+ dchan = phsu->chans;
+ for (i = 0; i < 6; i++) {
+ dchan->id = i;
+ dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dchan->uport = &phsu->port[i/2];
+ dchan->reg = phsu->reg + HSU_DMA_CHANS_REG_OFFSET +
+ i * HSU_DMA_CHANS_REG_LENGTH;
+
+ dchan++;
+ }
+
+ /* will share irq with port if irq < 0 */
+ if (share)
+ phsu->irq_port_and_dma = 1;
+ else {
+ phsu->dma_irq = irq;
+ ret = request_irq(irq, hsu_dma_irq, 0, "hsu dma", phsu);
+ if (ret) {
+ dev_err(pdev, "can not get dma IRQ\n");
+ goto err;
+ }
+ }
+
+ dev_set_drvdata(pdev, phsu);
+
+ return 0;
+err:
+ iounmap(phsu->reg);
+ return ret;
+}
+EXPORT_SYMBOL(serial_hsu_dma_setup);
+
+void serial_hsu_dma_free(void)
+{
+ free_irq(phsu->dma_irq, phsu);
+}
+EXPORT_SYMBOL(serial_hsu_dma_free);
+
+static int __init hsu_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&serial_hsu_reg);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&phsu->dma_lock);
+ return hsu_debugfs_init(phsu);
+}
+
+static void __exit hsu_exit(void)
+{
+ uart_unregister_driver(&serial_hsu_reg);
+ hsu_debugfs_remove(phsu);
+}
+
+module_init(hsu_init);
+module_exit(hsu_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
--- /dev/null
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_mfd.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <asm/intel_mid_hsu.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+static int dma_init_common(struct uart_hsu_port *up)
+{
+ struct hsu_dma_buffer *dbuf;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ /* 1. Allocate the RX buffer */
+ dbuf = &up->rxbuf;
+ dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
+ if (!dbuf->buf) {
+ up->use_dma = 0;
+ dev_err(up->dev, "allocate DMA buffer failed!!\n");
+ return -ENOMEM;
+ }
+
+ dbuf->dma_addr = dma_map_single(up->dev,
+ dbuf->buf,
+ HSU_DMA_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dbuf->dma_size = HSU_DMA_BUF_SIZE;
+
+ /* 2. prepare teh TX buffer */
+ dbuf = &up->txbuf;
+ dbuf->buf = xmit->buf;
+ dbuf->dma_addr = dma_map_single(up->dev,
+ dbuf->buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ dbuf->dma_size = UART_XMIT_SIZE;
+ dbuf->ofs = 0;
+ return 0;
+}
+
+static void dma_exit_common(struct uart_hsu_port *up)
+{
+ struct hsu_dma_buffer *dbuf;
+ struct uart_port *port = &up->port;
+
+ /* Free and unmap rx dma buffer */
+ dbuf = &up->rxbuf;
+ dma_unmap_single(port->dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_FROM_DEVICE);
+ kfree(dbuf->buf);
+
+ /* Next unmap tx dma buffer*/
+ dbuf = &up->txbuf;
+ dma_unmap_single(port->dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_INTEL_MID_DMAC
+static bool dw_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_priv *dw_dma = param;
+
+ if (dw_dma->dmac && (&dw_dma->dmac->dev == chan->device->dev))
+ return true;
+ else {
+#ifdef CONFIG_ACPI
+ acpi_handle handle = ACPI_HANDLE(chan->device->dev);
+ struct acpi_device *device;
+ int ret;
+ const char *hid;
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret) {
+ pr_warn("DW HSU: no acpi entry\n");
+ return false;
+ }
+ hid = acpi_device_hid(device);
+ if (!strncmp(hid, "INTL9C60", strlen(hid))) {
+ acpi_status status;
+ unsigned long long tmp;
+ status = acpi_evaluate_integer(handle,
+ "_UID", NULL, &tmp);
+ if (!ACPI_FAILURE(status) && (tmp == 1))
+ return true;
+ }
+ if (!strncmp(hid, "80862286", strlen(hid))) {
+ return true;
+ }
+
+#endif
+ return false;
+ }
+}
+
+/* the RX/TX buffer init should be a common stuff */
+static int dw_dma_init(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma;
+ struct intel_mid_dma_slave *rxs, *txs;
+ dma_cap_mask_t mask;
+ int ret = 0;
+
+ dw_dma = kzalloc(sizeof(*dw_dma), GFP_KERNEL);
+ if (!dw_dma) {
+ pr_warn("DW HSU: Can't alloc memory for dw_dm_priv\n");
+ return -1;
+ }
+
+ up->dma_priv = dw_dma;
+
+ /*
+ * Get pci device for DMA controller, currently it could only
+ * be the DMA controller of baytrail
+ */
+ dw_dma->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0f06, NULL);
+ if (!dw_dma->dmac) {
+ /* still have chance to get from ACPI dev */
+ pr_warn("DW HSU: Can't find LPIO1 DMA controller by PCI, try ACPI\n");
+ }
+
+ ret = dma_init_common(up);
+ if (ret)
+ return ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* 1. Init rx channel */
+ dw_dma->rxchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+ if (!dw_dma->rxchan)
+ goto err_exit;
+ rxs = &dw_dma->rxs;
+ rxs->dma_slave.direction = DMA_FROM_DEVICE;
+ rxs->hs_mode = LNW_DMA_HW_HS;
+ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+ rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ /* These are fixed HW info from Baytrail datasheet */
+ if (up->index == 0)
+ rxs->device_instance = 3;
+ else
+ rxs->device_instance = 5;
+ dw_dma->rxchan->private = rxs;
+
+ /* 2. Init tx channel */
+ dw_dma->txchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+ if (!dw_dma->txchan)
+ goto free_rxchan;
+
+ txs = &dw_dma->txs;
+ txs->dma_slave.direction = DMA_TO_DEVICE;
+ txs->hs_mode = LNW_DMA_HW_HS;
+ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+ txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ if (up->index == 0)
+ txs->device_instance = 2;
+ else
+ txs->device_instance = 4;
+ dw_dma->txchan->private = txs;
+
+ /* TX/RX reg share the same addr */
+ dw_dma->dma_addr = up->port.mapbase + UART_RX;
+
+ pm_qos_add_request(&up->qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ dw_dma->up = up;
+ up->dma_inited = 1;
+ return 0;
+
+free_rxchan:
+ dma_release_channel(dw_dma->rxchan);
+err_exit:
+ return -1;
+
+}
+
+static int dw_dma_suspend(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+
+ if (!up->dma_inited)
+ return 0;
+
+ txchan = dw_dma->txchan;
+ rxchan = dw_dma->rxchan;
+
+ if (test_bit(flag_rx_on, &up->flags) ||
+ test_bit(flag_rx_pending, &up->flags)) {
+ dev_warn(up->dev, "ignore suspend for rx dma is running\n");
+ return -1;
+ }
+
+ txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+ txchan->device->device_control(txchan, DMA_PAUSE, 0);
+ rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+ pm_qos_update_request(&up->qos, PM_QOS_DEFAULT_VALUE);
+ return 0;
+}
+
+static int dw_dma_resume(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+
+ if (!up->dma_inited)
+ return 0;
+
+ txchan = dw_dma->txchan;
+ rxchan = dw_dma->rxchan;
+
+ rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+ txchan->device->device_control(txchan, DMA_RESUME, 0);
+ pm_qos_update_request(&up->qos, CSTATE_EXIT_LATENCY_C2);
+ return 0;
+}
+
+
+static int dw_dma_exit(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct dma_chan *txchan = dw_dma->txchan;
+ struct dma_chan *rxchan = dw_dma->rxchan;
+
+ pm_qos_remove_request(&up->qos);
+ txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+ dma_release_channel(dw_dma->txchan);
+ dma_release_channel(dw_dma->rxchan);
+
+ dma_exit_common(up);
+
+ kfree(dw_dma);
+
+ up->dma_inited = 0;
+ up->dma_priv = NULL;
+ return 0;
+}
+
+static void dw_dma_tx_done(void *arg)
+{
+ struct dw_dma_priv *dw_dma = arg;
+ struct uart_hsu_port *up = dw_dma->up;
+ struct circ_buf *xmit = &up->port.state->xmit;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ unsigned long flags;
+ int count = 0;
+
+ count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr
+ - xmit->tail;
+
+ /* Update the circ buf info */
+ xmit->tail += dbuf->ofs;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ up->port.icount.tx += dbuf->ofs;
+
+ dbuf->ofs = 0;
+
+ clear_bit(flag_tx_on, &up->flags);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_sched_cmd(up, qcmd_start_tx);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+static void dw_dma_start_tx(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct dma_async_tx_descriptor *txdesc = NULL;
+ struct dma_chan *txchan;
+ struct dma_slave_config *txconf;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+ enum dma_ctrl_flags flag;
+
+ txchan = dw_dma->txchan;
+ txconf = &dw_dma->txs.dma_slave;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+ return;
+ }
+
+ /*
+ * Need to check if FCR is set, better to be set only once when
+ * use_dma == 1
+ */
+
+ set_bit(flag_tx_on, &up->flags);
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ if (count >= 2000)
+ count = 2000;
+
+ dbuf->ofs = count;
+
+
+ if (!count) {
+ pr_err("we see a case of TX Len == 0!!!\n\n");
+ dump_stack();
+ clear_bit(flag_tx_on, &up->flags);
+ return;
+ }
+
+ /* 2. Prepare the TX dma transfer */
+ txconf->direction = DMA_TO_DEVICE;
+ txconf->dst_addr = dw_dma->dma_addr;
+ txconf->src_maxburst = LNW_DMA_MSIZE_8;
+ txconf->dst_maxburst = LNW_DMA_MSIZE_8;
+ txconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+ (unsigned long) txconf);
+
+ dma_sync_single_for_device(up->port.dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_TO_DEVICE);
+
+ flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+
+ txdesc = txchan->device->device_prep_dma_memcpy(
+ txchan, /* DMA Channel */
+ dw_dma->dma_addr, /* DAR */
+ dbuf->dma_addr + xmit->tail, /* SAR */
+ count, /* Data len */
+ flag); /* Flag */
+ if (!txdesc) {
+ pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+ return;
+ }
+
+ txdesc->callback = dw_dma_tx_done;
+ txdesc->callback_param = dw_dma;
+ txdesc->tx_submit(txdesc);
+}
+
+static void dw_dma_stop_tx(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct dma_chan *txchan = dw_dma->txchan;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ int ret;
+ int count;
+
+ if (!test_bit(flag_tx_on, &up->flags))
+ return;
+
+ count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr;
+
+ /* ? this may be sleepable */
+ ret = txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ if (ret)
+ dev_warn(up->dev, "Fail to stop DMA RX channel!\n");
+}
+
+static void dw_dma_rx_done(void *arg)
+{
+ struct dw_dma_priv *dw_dma = arg;
+ struct uart_hsu_port *up = dw_dma->up;
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct uart_port *port = &up->port;
+ struct tty_struct *tty;
+ struct tty_port *tport = &port->state->port;
+ int count;
+ unsigned long flags;
+
+ tty = tty_port_tty_get(&up->port.state->port);
+ if (!tty)
+ return;
+
+ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ count = dbuf->ofs;
+ tty_insert_flip_string(tport, dbuf->buf, count);
+ port->icount.rx += count;
+
+ /* Do we really need it for x86? */
+ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ tty_flip_buffer_push(tport);
+ tty_kref_put(tty);
+
+ clear_bit(flag_rx_on, &up->flags);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (test_bit(flag_rx_pending, &up->flags))
+ serial_sched_cmd(up, qcmd_start_rx);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+}
+
+
+static void dw_dma_start_rx(struct uart_hsu_port *up)
+{
+ struct dma_async_tx_descriptor *rxdesc = NULL;
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct dma_chan *rxchan = dw_dma->rxchan;
+ struct dma_slave_config *rxconf = &dw_dma->rxs.dma_slave;
+ enum dma_ctrl_flags flag;
+
+ if (test_and_set_bit(flag_rx_on, &up->flags)) {
+ set_bit(flag_rx_pending, &up->flags);
+ return;
+ }
+
+ dbuf->ofs = 2048 - 64;
+
+ /* Prepare the RX dma transfer */
+ rxconf->direction = DMA_FROM_DEVICE;
+ rxconf->src_addr = dw_dma->dma_addr;
+ rxconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ /* feng: better to calculate a best size */
+ rxconf->src_maxburst = LNW_DMA_MSIZE_8;
+ rxconf->dst_maxburst = LNW_DMA_MSIZE_8;
+
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+ (unsigned long) rxconf);
+ flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+ rxdesc = rxchan->device->device_prep_dma_memcpy(
+ rxchan, /* DMA chan */
+ dbuf->dma_addr, /* DAR */
+ dw_dma->dma_addr, /* SAR */
+ dbuf->ofs, /* data len */
+ flag);
+ if (!rxdesc) {
+ pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+ return;
+ }
+
+ rxdesc->callback = dw_dma_rx_done;
+ rxdesc->callback_param = dw_dma;
+ rxdesc->tx_submit(rxdesc);
+}
+
+static void dw_dma_stop_rx(struct uart_hsu_port *up)
+{
+ struct dw_dma_priv *dw_dma = up->dma_priv;
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct dma_chan *rxchan = dw_dma->rxchan;
+ int count, ret;
+ struct uart_port *port = &up->port;
+ struct tty_struct *tty;
+ struct tty_port *tport = &port->state->port;
+
+ if (!test_bit(flag_rx_on, &up->flags)) {
+ clear_bit(flag_rx_pending, &up->flags);
+ return;
+ }
+
+ ret = rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+ if (ret) {
+ WARN(1, "DMA TERMINATE of TX returns error\n");
+ return;
+ }
+
+ tty = tty_port_tty_get(&up->port.state->port);
+ if (!tty)
+ return;
+
+ count = intel_dma_get_dst_addr(rxchan) - dbuf->dma_addr;
+ if (!count)
+ goto exit;
+
+ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ tty_insert_flip_string(tport, dbuf->buf, count);
+ port->icount.rx += count;
+
+ /* Do we really need it for x86? */
+ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ tty_flip_buffer_push(tport);
+
+exit:
+ tty_kref_put(tty);
+ clear_bit(flag_rx_on, &up->flags);
+ clear_bit(flag_rx_pending, &up->flags);
+}
+
+struct hsu_dma_ops dw_dma_ops = {
+ .init = dw_dma_init,
+ .exit = dw_dma_exit,
+ .suspend = dw_dma_suspend,
+ .resume = dw_dma_resume,
+ .start_tx = dw_dma_start_tx,
+ .stop_tx = dw_dma_stop_tx,
+ .start_rx = dw_dma_start_rx,
+ .stop_rx = dw_dma_stop_rx,
+};
+
+struct hsu_dma_ops *pdw_dma_ops = &dw_dma_ops;
+
+#else
+struct hsu_dma_ops *pdw_dma_ops = NULL;
+#endif
+
+/* Intel DMA ops */
+
+/* The buffer is already cache coherent */
+void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
+ struct hsu_dma_buffer *dbuf)
+{
+ dbuf->ofs = 0;
+
+ chan_writel(rxc, HSU_CH_BSR, HSU_DMA_BSR);
+ chan_writel(rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+ chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout, Errata 1 */
+ );
+ chan_writel(rxc, HSU_CH_CR, 0x3);
+}
+
+static int intel_dma_init(struct uart_hsu_port *up)
+{
+ int ret;
+
+ clear_bit(flag_tx_on, &up->flags);
+
+ ret = dma_init_common(up);
+ if (ret)
+ return ret;
+
+ /* This should not be changed all around */
+ chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+ chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+ /* Start the RX channel right now */
+ hsu_dma_start_rx_chan(up->rxc, &up->rxbuf);
+
+ up->dma_inited = 1;
+ return 0;
+}
+
+static int intel_dma_exit(struct uart_hsu_port *up)
+{
+ chan_writel(up->txc, HSU_CH_CR, 0x0);
+ clear_bit(flag_tx_on, &up->flags);
+ chan_writel(up->rxc, HSU_CH_CR, 0x2);
+ dma_exit_common(up);
+
+ up->dma_inited = 0;
+ return 0;
+}
+
+
+static void intel_dma_start_tx(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ chan_writel(up->txc, HSU_CH_CR, 0x0);
+ while (chan_readl(up->txc, HSU_CH_CR))
+ cpu_relax();
+ clear_bit(flag_tx_on, &up->flags);
+ if (dbuf->ofs) {
+ u32 real = chan_readl(up->txc, HSU_CH_D0SAR) - up->tx_addr;
+
+ /* we found in flow control case, TX irq came without sending
+ * all TX buffer
+ */
+ if (real < dbuf->ofs)
+ dbuf->ofs = real; /* adjust to real chars sent */
+
+ /* Update the circ buf info */
+ xmit->tail += dbuf->ofs;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ up->port.icount.tx += dbuf->ofs;
+ dbuf->ofs = 0;
+ }
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+ set_bit(flag_tx_on, &up->flags);
+ dma_sync_single_for_device(up->port.dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_TO_DEVICE);
+
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dbuf->ofs = count;
+
+ /* Reprogram the channel */
+ up->tx_addr = dbuf->dma_addr + xmit->tail;
+ chan_writel(up->txc, HSU_CH_D0SAR, up->tx_addr);
+ chan_writel(up->txc, HSU_CH_D0TSR, count);
+
+ /* Reenable the channel */
+ chan_writel(up->txc, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16));
+ chan_writel(up->txc, HSU_CH_CR, 0x1);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return;
+}
+
+static void intel_dma_stop_tx(struct uart_hsu_port *up)
+{
+ chan_writel(up->txc, HSU_CH_CR, 0x0);
+ return;
+}
+
+static void intel_dma_start_rx(struct uart_hsu_port *up)
+{
+ return;
+}
+
+static void intel_dma_stop_rx(struct uart_hsu_port *up)
+{
+ chan_writel(up->rxc, HSU_CH_CR, 0x2);
+ return;
+}
+
+static void intel_dma_context_op(struct uart_hsu_port *up, int op)
+{
+ if (op == context_save) {
+ up->txc->cr = chan_readl(up->txc, HSU_CH_CR);
+ up->txc->dcr = chan_readl(up->txc, HSU_CH_DCR);
+ up->txc->sar = chan_readl(up->txc, HSU_CH_D0SAR);
+ up->txc->tsr = chan_readl(up->txc, HSU_CH_D0TSR);
+
+ up->rxc->cr = chan_readl(up->rxc, HSU_CH_CR);
+ up->rxc->dcr = chan_readl(up->rxc, HSU_CH_DCR);
+ up->rxc->sar = chan_readl(up->rxc, HSU_CH_D0SAR);
+ up->rxc->tsr = chan_readl(up->rxc, HSU_CH_D0TSR);
+ } else {
+ chan_writel(up->txc, HSU_CH_DCR, up->txc->dcr);
+ chan_writel(up->txc, HSU_CH_D0SAR, up->txc->sar);
+ chan_writel(up->txc, HSU_CH_D0TSR, up->txc->tsr);
+ chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+ chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+ chan_writel(up->rxc, HSU_CH_DCR, up->rxc->dcr);
+ chan_writel(up->rxc, HSU_CH_D0SAR, up->rxc->sar);
+ chan_writel(up->rxc, HSU_CH_D0TSR, up->rxc->tsr);
+ chan_writel(up->rxc, HSU_CH_BSR, HSU_DMA_BSR);
+ chan_writel(up->rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+ }
+}
+
+
+static int intel_dma_resume(struct uart_hsu_port *up)
+{
+ chan_writel(up->rxc, HSU_CH_CR, up->rxc_chcr_save);
+ return 0;
+}
+
+static int intel_dma_suspend(struct uart_hsu_port *up)
+{
+ int loop = 100000;
+ struct hsu_dma_chan *chan = up->rxc;
+
+ up->rxc_chcr_save = chan_readl(up->rxc, HSU_CH_CR);
+
+ if (test_bit(flag_startup, &up->flags)
+ && serial_in(up, UART_FOR) & 0x7F) {
+ dev_err(up->dev, "ignore suspend for rx fifo\n");
+ return -1;
+ }
+
+ if (chan_readl(up->txc, HSU_CH_CR)) {
+ dev_info(up->dev, "ignore suspend for tx dma\n");
+ return -1;
+ }
+
+ chan_writel(up->rxc, HSU_CH_CR, 0x2);
+ while (--loop) {
+ if (chan_readl(up->rxc, HSU_CH_CR) == 0x2)
+ break;
+ cpu_relax();
+ }
+
+ if (!loop) {
+ dev_err(up->dev, "Can't stop rx dma\n");
+ return -1;
+ }
+
+ if (chan_readl(chan, HSU_CH_D0SAR) - up->rxbuf.dma_addr) {
+ dev_err(up->dev, "ignore suspend for dma pointer\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+struct hsu_dma_ops intel_dma_ops = {
+ .init = intel_dma_init,
+ .exit = intel_dma_exit,
+ .suspend = intel_dma_suspend,
+ .resume = intel_dma_resume,
+ .start_tx = intel_dma_start_tx,
+ .stop_tx = intel_dma_stop_tx,
+ .start_rx = intel_dma_start_rx,
+ .stop_rx = intel_dma_stop_rx,
+ .context_op = intel_dma_context_op,
+};
+
+
--- /dev/null
+/*
+ * mfd_pci.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ * are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ * asserted, only when the HW is reset the DDCD and DDSR will
+ * be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_PM
+static int serial_hsu_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (up) {
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_suspend(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ }
+ return ret;
+}
+
+static int serial_hsu_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (up) {
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_resume(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ }
+ return ret;
+}
+#else
+#define serial_hsu_pci_suspend NULL
+#define serial_hsu_pci_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_pci_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+ return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_suspend(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+
+static int serial_hsu_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_resume(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+#else
+#define serial_hsu_pci_runtime_idle NULL
+#define serial_hsu_pci_runtime_suspend NULL
+#define serial_hsu_pci_runtime_resume NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_pci_pm_ops = {
+
+ SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_pci_suspend,
+ serial_hsu_pci_resume)
+ SET_RUNTIME_PM_OPS(serial_hsu_pci_runtime_suspend,
+ serial_hsu_pci_runtime_resume,
+ serial_hsu_pci_runtime_idle)
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_port_pci_ids) = {
+ { PCI_VDEVICE(INTEL, 0x081B), hsu_port0 },
+ { PCI_VDEVICE(INTEL, 0x081C), hsu_port1 },
+ { PCI_VDEVICE(INTEL, 0x081D), hsu_port2 },
+ /* Cloverview support */
+ { PCI_VDEVICE(INTEL, 0x08FC), hsu_port0 },
+ { PCI_VDEVICE(INTEL, 0x08FD), hsu_port1 },
+ { PCI_VDEVICE(INTEL, 0x08FE), hsu_port2 },
+ /* Tangier support */
+ { PCI_VDEVICE(INTEL, 0x1191), hsu_port0 },
+ /* VLV2 support */
+ { PCI_VDEVICE(INTEL, 0x0F0A), hsu_port0 },
+ { PCI_VDEVICE(INTEL, 0x0F0C), hsu_port1 },
+ /* CHV support */
+ { PCI_VDEVICE(INTEL, 0x228A), hsu_port0 },
+ { PCI_VDEVICE(INTEL, 0x228C), hsu_port1 },
+ {},
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_dma_pci_ids) = {
+ { PCI_VDEVICE(INTEL, 0x081E), hsu_dma },
+ /* Cloverview support */
+ { PCI_VDEVICE(INTEL, 0x08FF), hsu_dma },
+ /* Tangier support */
+ { PCI_VDEVICE(INTEL, 0x1192), hsu_dma },
+ {},
+};
+
+static int serial_hsu_pci_port_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct uart_hsu_port *up;
+ int ret, port, hw_type;
+ resource_size_t start, len;
+
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ dev_info(&pdev->dev,
+ "FUNC: %d driver: %ld addr:%lx len:%lx\n",
+ PCI_FUNC(pdev->devfn), ent->driver_data,
+ (unsigned long) start, (unsigned long) len);
+
+ port = intel_mid_hsu_func_to_port(PCI_FUNC(pdev->devfn));
+ if (port == -1)
+ return 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_region(pdev, 0, "hsu");
+ if (ret)
+ goto err;
+
+ up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+ pdev->irq);
+ if (IS_ERR(up))
+ goto err;
+
+ pci_set_drvdata(pdev, up);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+ return 0;
+err:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void serial_hsu_pci_port_remove(struct pci_dev *pdev)
+{
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ serial_hsu_port_free(up);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+static void serial_hsu_pci_port_shutdown(struct pci_dev *pdev)
+{
+ struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+ if (!up)
+ return;
+
+ serial_hsu_port_shutdown(up);
+}
+
+static struct pci_driver hsu_port_pci_driver = {
+ .name = "HSU serial",
+ .id_table = hsuart_port_pci_ids,
+ .probe = serial_hsu_pci_port_probe,
+ .remove = serial_hsu_pci_port_remove,
+ .shutdown = serial_hsu_pci_port_shutdown,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+ .driver = {
+ .pm = &serial_hsu_pci_pm_ops,
+ },
+#endif
+};
+
+static int serial_hsu_pci_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct hsu_dma_chan *dchan;
+ int ret, share_irq = 0;
+ resource_size_t start, len;
+
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ dev_info(&pdev->dev,
+ "FUNC: %d driver: %ld addr:%lx len:%lx\n",
+ PCI_FUNC(pdev->devfn), ent->driver_data,
+ (unsigned long) pci_resource_start(pdev, 0),
+ (unsigned long) pci_resource_len(pdev, 0));
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_region(pdev, 0, "hsu dma");
+ if (ret)
+ goto err;
+
+ /* share irq with port? ANN all and TNG chip from B0 stepping */
+ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&
+ pdev->revision >= 0x1) ||
+ intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+ share_irq = 1;
+
+ ret = serial_hsu_dma_setup(&pdev->dev, start, len, pdev->irq, share_irq);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void serial_hsu_pci_dma_remove(struct pci_dev *pdev)
+{
+ serial_hsu_dma_free();
+ pci_disable_device(pdev);
+ pci_unregister_driver(&hsu_port_pci_driver);
+}
+
+static struct pci_driver hsu_dma_pci_driver = {
+ .name = "HSU DMA",
+ .id_table = hsuart_dma_pci_ids,
+ .probe = serial_hsu_pci_dma_probe,
+ .remove = serial_hsu_pci_dma_remove,
+};
+
+static int __init hsu_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&hsu_dma_pci_driver);
+ if (!ret) {
+ ret = pci_register_driver(&hsu_port_pci_driver);
+ if (ret)
+ pci_unregister_driver(&hsu_dma_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit hsu_pci_exit(void)
+{
+ pci_unregister_driver(&hsu_port_pci_driver);
+ pci_unregister_driver(&hsu_dma_pci_driver);
+}
+
+module_init(hsu_pci_init);
+module_exit(hsu_pci_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
--- /dev/null
+/*
+ * mfd_plat.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+#include <linux/pci.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hsu_acpi_ids[] = {
+ { "80860F0A", 0 },
+ { "8086228A", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hsu_acpi_ids);
+#endif
+
+#ifdef CONFIG_PM
+static int serial_hsu_plat_suspend(struct device *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (up) {
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_suspend(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ }
+ return ret;
+}
+
+static int serial_hsu_plat_resume(struct device *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (up) {
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_resume(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ }
+ return ret;
+}
+#else
+#define serial_hsu_plat_suspend NULL
+#define serial_hsu_plat_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_plat_runtime_idle(struct device *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+
+ return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_plat_runtime_suspend(struct device *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+ int ret = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_suspend(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+
+static int serial_hsu_plat_runtime_resume(struct device *dev)
+{
+ struct uart_hsu_port *up = dev_get_drvdata(dev);
+ int ret = 0;
+
+ trace_hsu_func_start(up->index, __func__);
+ ret = serial_hsu_do_resume(up);
+ trace_hsu_func_end(up->index, __func__, "");
+ return ret;
+}
+#else
+#define serial_hsu_plat_runtime_idle NULL
+#define serial_hsu_plat_runtime_suspend NULL
+#define serial_hsu_plat_runtime_resume NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_plat_pm_ops = {
+
+ SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_plat_suspend,
+ serial_hsu_plat_resume)
+ SET_RUNTIME_PM_OPS(serial_hsu_plat_runtime_suspend,
+ serial_hsu_plat_runtime_resume,
+ serial_hsu_plat_runtime_idle)
+};
+
+static int serial_hsu_plat_port_probe(struct platform_device *pdev)
+{
+ struct uart_hsu_port *up;
+ int port = pdev->id, irq;
+ struct resource *mem, *ioarea;
+ const struct acpi_device_id *id;
+ resource_size_t start, len;
+
+#ifdef CONFIG_ACPI
+ for (id = hsu_acpi_ids; id->id[0]; id++)
+ if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
+ acpi_status status;
+ unsigned long long tmp;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ port = tmp - 1;
+ }
+#endif
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+ start = mem->start;
+ len = resource_size(mem);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq; /* -ENXIO */
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "HSU region already claimed\n");
+ return -EBUSY;
+ }
+
+ up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+ irq);
+ if (IS_ERR(up)) {
+ release_mem_region(mem->start, resource_size(mem));
+ dev_err(&pdev->dev, "failed to setup HSU\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, up);
+
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static int serial_hsu_plat_port_remove(struct platform_device *pdev)
+{
+ struct uart_hsu_port *up = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ pm_runtime_forbid(&pdev->dev);
+ serial_hsu_port_free(up);
+ platform_set_drvdata(pdev, NULL);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static void serial_hsu_plat_port_shutdown(struct platform_device *pdev)
+{
+ struct uart_hsu_port *up = platform_get_drvdata(pdev);
+
+ if (!up)
+ return;
+
+ serial_hsu_port_shutdown(up);
+}
+
+static struct platform_driver hsu_plat_driver = {
+ .remove = serial_hsu_plat_port_remove,
+ .shutdown = serial_hsu_plat_port_shutdown,
+ .driver = {
+ .name = "HSU serial",
+ .owner = THIS_MODULE,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+ .pm = &serial_hsu_plat_pm_ops,
+#endif
+#ifdef CONFIG_ACPI
+ .acpi_match_table = ACPI_PTR(hsu_acpi_ids),
+#endif
+ },
+};
+
+static int __init hsu_plat_init(void)
+{
+ struct pci_dev *hsu_pci;
+
+ /*
+ * Try to get pci device, if exist, then exit ACPI platform
+ * register, On BYT FDK, include two enum mode: PCI, ACPI,
+ * ignore ACPI enum mode.
+ */
+ hsu_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0F0A, NULL);
+ if (hsu_pci) {
+ pr_info("HSU serial: Find HSU controller in PCI device, "
+ "exit ACPI platform register!\n");
+ return 0;
+ }
+
+ return platform_driver_probe(&hsu_plat_driver, serial_hsu_plat_port_probe);
+}
+
+static void __exit hsu_plat_exit(void)
+{
+ platform_driver_unregister(&hsu_plat_driver);
+}
+
+module_init(hsu_plat_init);
+module_exit(hsu_plat_exit);
+
+MODULE_AUTHOR("Jason Chen <jason.cj.chen@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu-plat");
--- /dev/null
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mfd_trace
+
+#define TRACE_SYSTEM hsu
+
+#if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HSU_H
+
+#include <linux/tracepoint.h>
+
+#define hsucmd_name(cmd) { cmd, #cmd }
+#define show_hsucmd_name(val) \
+ __print_symbolic(val, \
+ hsucmd_name(qcmd_overflow), \
+ hsucmd_name(qcmd_get_msr), \
+ hsucmd_name(qcmd_set_mcr), \
+ hsucmd_name(qcmd_set_ier), \
+ hsucmd_name(qcmd_start_rx), \
+ hsucmd_name(qcmd_stop_rx), \
+ hsucmd_name(qcmd_start_tx), \
+ hsucmd_name(qcmd_stop_tx), \
+ hsucmd_name(qcmd_cl), \
+ hsucmd_name(qcmd_port_irq), \
+ hsucmd_name(qcmd_dma_irq), \
+ hsucmd_name(qcmd_enable_irq), \
+ hsucmd_name(qcmd_cmd_off))
+
+
+TRACE_EVENT(hsu_cmd_insert,
+
+ TP_PROTO(unsigned port, char cmd),
+
+ TP_ARGS(port, cmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(char, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("port=%u cmd=%s", __entry->port,
+ show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_add,
+
+ TP_PROTO(unsigned port, char cmd),
+
+ TP_ARGS(port, cmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(char, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("port=%u cmd=%s", __entry->port,
+ show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_start,
+
+ TP_PROTO(unsigned port, char cmd),
+
+ TP_ARGS(port, cmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(char, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("port=%u cmd=%s", __entry->port,
+ show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_end,
+
+ TP_PROTO(unsigned port, char cmd),
+
+ TP_ARGS(port, cmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(char, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("port=%u cmd=%s", __entry->port,
+ show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_func_start,
+
+ TP_PROTO(unsigned port, const char *func),
+
+ TP_ARGS(port, func),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __string(name, func)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __assign_str(name, func);
+ ),
+
+ TP_printk("port=%u func=%s", __entry->port,
+ __get_str(name))
+);
+
+TRACE_EVENT(hsu_func_end,
+
+ TP_PROTO(unsigned port, const char *func, char *err),
+
+ TP_ARGS(port, func, err),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __string(name, func)
+ __string(ret, err)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __assign_str(name, func);
+ __assign_str(ret, err);
+ ),
+
+ TP_printk("port=%u func=%s err=%s", __entry->port,
+ __get_str(name), __get_str(ret))
+);
+
+TRACE_EVENT(hsu_mctrl,
+
+ TP_PROTO(unsigned port, unsigned mctrl),
+
+ TP_ARGS(port, mctrl),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(unsigned, mctrl)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->mctrl = mctrl;
+ ),
+
+ TP_printk("port=%u mctrl=%d", __entry->port, __entry->mctrl)
+);
+
+TRACE_EVENT(hsu_set_termios,
+
+ TP_PROTO(unsigned port, unsigned int baud, int ctsrts),
+
+ TP_ARGS(port, baud, ctsrts),
+
+ TP_STRUCT__entry(
+ __field(unsigned, port)
+ __field(unsigned int, baud)
+ __field(int, ctsrts)
+ ),
+
+ TP_fast_assign(
+ __entry->port = port;
+ __entry->baud = baud;
+ __entry->ctsrts = ctsrts;
+ ),
+
+ TP_printk("port=%u baud=%d ctsrts=%d", __entry->port,
+ __entry->baud, __entry->ctsrts)
+);
+
+#endif /* if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
+#include <linux/serial_max3110.h>
#include <linux/kthread.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "mrst_max3110.h"
struct task_struct *main_thread;
struct task_struct *read_thread;
struct mutex thread_mutex;
+ struct mutex io_mutex;
u32 baud;
u16 cur_conf;
u8 clock;
u8 parity, word_7bits;
u16 irq;
+ u16 irq_edge_triggered;
unsigned long uart_flags;
struct spi_transfer x;
int ret;
+ mutex_lock(&max->io_mutex);
spi_message_init(&message);
memset(&x, 0, sizeof x);
x.len = len;
/* Do the i/o */
ret = spi_sync(spi, &message);
+ mutex_unlock(&max->io_mutex);
return ret;
}
return;
}
-#define WORDS_PER_XFER 128
static void send_circ_buf(struct uart_max3110 *max,
struct circ_buf *xmit)
{
int i, len, blen, dma_size, left, ret = 0;
- dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
+ dma_size = M3110_RX_FIFO_DEPTH * sizeof(u16) * 2;
buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
if (!buf)
return;
while (!uart_circ_empty(xmit)) {
left = uart_circ_chars_pending(xmit);
while (left) {
- len = min(left, WORDS_PER_XFER);
+ len = min(left, M3110_RX_FIFO_DEPTH);
blen = len * sizeof(u16);
memset(ibuf, 0, blen);
max->uart_flags || kthread_should_stop());
mutex_lock(&max->thread_mutex);
-
- if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ if (max->irq_edge_triggered &&
+ test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
max3110_con_receive(max);
/* first handle console output */
{
struct uart_max3110 *max = dev_id;
- /* max3110's irq is a falling edge, not level triggered,
- * so no need to disable the irq */
+ if (max->irq_edge_triggered) {
+ /* max3110's irq is a falling edge, not level triggered,
+ * so no need to disable the irq */
- if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
- wake_up(&max->wq);
+ if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ wake_up(&max->wq);
+ } else {
+ max3110_con_receive(max);
+ }
return IRQ_HANDLED;
}
/* as we use thread to handle tx/rx, need set low latency */
port->state->port.low_latency = 1;
- if (max->irq) {
- max->read_thread = NULL;
- ret = request_irq(max->irq, serial_m3110_irq,
- IRQ_TYPE_EDGE_FALLING, "max3110", max);
- if (ret) {
- max->irq = 0;
- pr_err(PR_FMT "unable to allocate IRQ, polling\n");
- } else {
- /* Enable RX IRQ only */
- config |= WC_RXA_IRQ_ENABLE;
- }
- }
-
- if (max->irq == 0) {
+ if (max->irq > 0) {
+ /* Enable RX IRQ only */
+ config |= WC_RXA_IRQ_ENABLE;
+ } else {
/* If IRQ is disabled, start a read thread for input data */
max->read_thread =
kthread_run(max3110_read_thread, max, "max3110_read");
ret = max3110_out(max, config);
if (ret) {
- if (max->irq)
- free_irq(max->irq, max);
if (max->read_thread)
kthread_stop(max->read_thread);
max->read_thread = NULL;
max->read_thread = NULL;
}
- if (max->irq)
- free_irq(max->irq, max);
-
/* Disable interrupts from this port */
config = WC_TAG | WC_SW_SHDI;
max3110_out(max, config);
struct spi_device *spi = to_spi_device(dev);
struct uart_max3110 *max = spi_get_drvdata(spi);
- disable_irq(max->irq);
+ if (max->irq > 0)
+ disable_irq(max->irq);
uart_suspend_port(&serial_m3110_reg, &max->port);
max3110_out(max, max->cur_conf | WC_SW_SHDI);
return 0;
max3110_out(max, max->cur_conf);
uart_resume_port(&serial_m3110_reg, &max->port);
- enable_irq(max->irq);
+ if (max->irq > 0)
+ enable_irq(max->irq);
return 0;
}
void *buffer;
u16 res;
int ret = 0;
+ struct plat_max3110 *pdata = spi->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
max = kzalloc(sizeof(*max), GFP_KERNEL);
if (!max)
max->irq = (u16)spi->irq;
mutex_init(&max->thread_mutex);
+ mutex_init(&max->io_mutex);
max->word_7bits = 0;
max->parity = 0;
goto err_kthread;
}
+ max->irq_edge_triggered = pdata->irq_edge_triggered;
+
+ if (max->irq > 0) {
+ if (max->irq_edge_triggered) {
+ ret = request_irq(max->irq, serial_m3110_irq,
+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
+ } else {
+ ret = request_threaded_irq(max->irq, NULL,
+ serial_m3110_irq,
+ IRQF_ONESHOT, "max3110", max);
+ }
+
+ if (ret) {
+ max->irq = 0;
+ dev_warn(&spi->dev,
+ "unable to allocate IRQ, will use polling method\n");
+ }
+ }
+
spi_set_drvdata(spi, max);
pmax = max;
free_page((unsigned long)max->con_xmit.buf);
+ if (max->irq)
+ free_irq(max->irq, max);
+
if (max->main_thread)
kthread_stop(max->main_thread);
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
+ if (port->ops->wake_peer)
+ port->ops->wake_peer(port);
+
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
usb_lock_device(udev);
usb_remote_wakeup(udev);
usb_unlock_device(udev);
+ if (HCD_IRQ_DISABLED(hcd)) {
+ /* We can now process IRQs so enable IRQ */
+ clear_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+ enable_irq(hcd->irq);
+ }
}
/**
*/
local_irq_save(flags);
- if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
+ if (unlikely(HCD_DEAD(hcd)))
rc = IRQ_NONE;
- else if (hcd->driver->irq(hcd) == IRQ_NONE)
+ else if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
+ if (hcd->has_wakeup_irq) {
+ /*
+ * We got a wakeup interrupt while the controller was
+ * suspending or suspended. We can't handle it now, so
+ * disable the IRQ and resume the root hub (and hence
+ * the controller too).
+ */
+ disable_irq_nosync(hcd->irq);
+ set_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+ usb_hcd_resume_root_hub(hcd);
+ rc = IRQ_HANDLED;
+ } else
+ rc = IRQ_NONE;
+ } else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
else
rc = IRQ_HANDLED;
endchoice
+comment "Platform Glue Driver Support"
+
+config USB_DWC3_PCI
+ tristate "PCIe-based Platforms"
+ depends on PCI
+ default USB_DWC3
+ help
+ If you're using the DesignWare Core IP with a PCIe, please say
+ 'Y' or 'M' here.
+
+ One such PCIe-based platform is Synopsys' PCIe HAPS model of
+ this IP.
+
+config USB_DWC3_OTG
+ tristate "DWC3 OTG mode support"
+ depends on USB && PCI
+ select USB_OTG
+ help
+ Say Y here to enable DWC3 OTG driver.
+ This driver implement OTG framework for DWC3 OTG controller.
+ Support role switch and charger detection feature. And maintain
+ one state machine. This driver should be work with platform
+ speical driver. Because every platform has their own hardware design.
+
+config USB_DWC3_INTEL_MRFL
+ tristate "DWC OTG 3.0 for Intel Merrifield platforms"
+ depends on USB && USB_DWC3_OTG
+ select USB_DWC3_DEVICE_INTEL
+ help
+ Say Y here to enable DWC3 OTG driver for Intel Merrifield platforms.
+ It implement OTG feature on DWC3 OTG controller.
+ Support role switch and charger detection feature.
+ This driver is must be set if you want to enable host mode on Intel
+ Merrifield platforms.
+
+config USB_DWC3_INTEL_BYT
+ tristate "DWC OTG 3.0 for Intel Baytrail platforms"
+ depends on USB && USB_DWC3_OTG
+ select USB_DWC3_DEVICE_INTEL
+ help
+ Say Y here to enable DWC3 OTG driver for Intel Baytrail platforms.
+ It implement OTG feature on DWC3 OTG controller.
+ Support role switch and charger detection feature.
+ This driver is must be set if you want to enable device mode on Intel
+ Baytrial platforms.
+
+config USB_DWC3_DEVICE_INTEL
+ bool "DWC3 Device Mode support on Intel platform"
+ depends on USB_DWC3_OTG
+ help
+ Support Device mode of DWC3 controller on Intel platform.
+ It implement device mode feature on DWC3 OTG controller.
+ This driver is must be set if you want to enable device mode for Intel
+ platforms(e.g Baytrail and Merrifield)
+
+config USB_DWC3_HOST_INTEL
+ bool "DWC3 Host Mode support on Intel Merrifield platform"
+ depends on USB_ARCH_HAS_XHCI && USB_DWC3_INTEL_MRFL
+ help
+ Support Host mode of DWC3 controller on Intel Merrifield platform.
+ It is should be enable with DWC3 INTEL driver. Because Intel platform
+ use different design with standard USB_DWC3_HOST. So if you want to
+ enable host mode on Intel platform, then you have to enable this config.
+
+comment "Debugging features"
+
config USB_DWC3_DEBUG
bool "Enable Debugging Messages"
help
ccflags-$(CONFIG_USB_DWC3_DEBUG) := -DDEBUG
ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
+obj-$(CONFIG_USB_DWC3_DEVICE_INTEL) += dwc3-device-intel.o
+obj-$(CONFIG_USB_DWC3_INTEL_MRFL) += dwc3-intel-mrfl.o
+ifneq ($(CONFIG_DEBUG_FS),)
+ obj-$(CONFIG_USB_DWC3_DEVICE_INTEL) += debugfs.o
+endif
+
+ifeq ($(CONFIG_USB_DWC3_DEVICE_INTEL),)
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o
ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
endif
+endif
##
# Platform-specific glue layers go here
obj-$(CONFIG_USB_DWC3) += dwc3-exynos.o
ifneq ($(CONFIG_PCI),)
+ obj-$(CONFIG_USB_DWC3_OTG) += otg.o
obj-$(CONFIG_USB_DWC3) += dwc3-pci.o
endif
-
#include <linux/usb/otg.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/ulpi.h>
#include "core.h"
#include "gadget.h"
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n),
+ dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(n)));
}
}
unsigned long timeout;
u32 reg;
int ret;
+ struct usb_phy *usb_phy;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
/* This should read as U3 followed by revision number */
}
dwc->revision = reg;
+ dwc3_core_soft_reset(dwc);
+
+ /* Delay 1 ms Before DCTL soft reset to make it safer from hitting
+ * Tx-CMD PHY hang issue.
+ */
+ mdelay(1);
+
/* issue device SoftReset too */
timeout = jiffies + msecs_to_jiffies(500);
dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
cpu_relax();
} while (true);
- dwc3_core_soft_reset(dwc);
+ /* DCTL core soft reset may cause PHY hang, delay 1 ms and check ulpi */
+ mdelay(1);
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (usb_phy &&
+ usb_phy_io_read(usb_phy, ULPI_VENDOR_ID_LOW) < 0)
+ dev_err(dwc->dev,
+ "ULPI not working after DCTL soft reset\n");
+ usb_put_phy(usb_phy);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
#include <linux/usb/gadget.h>
/* Global constants */
+#define DWC3_SCRATCH_BUF_SIZE 4096
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+/* Global Event Size Registers */
+#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
+#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
+
/* Global HWPARAMS1 Register */
#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24)
#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
#define DWC3_DGCMD_SET_LMP 0x01
#define DWC3_DGCMD_SET_PERIODIC_PAR 0x02
#define DWC3_DGCMD_XMIT_FUNCTION 0x03
+#define DWC3_DGCMD_SET_SCRATCH_ADDR_LO 0x04
/* These apply for core versions 1.94a and later */
#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO 0x04
* @trb_pool_dma: dma address of @trb_pool
* @free_slot: next slot which is going to be used
* @busy_slot: first slot which is owned by HW
+ * @ep_state: endpoint state
* @desc: usb_endpoint_descriptor pointer
* @dwc: pointer to DWC controller
* @flags: endpoint flags (wedged, stalled, ...)
+ * @flags_backup: backup endpoint flags
* @current_trb: index of current used trb
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
dma_addr_t trb_pool_dma;
u32 free_slot;
u32 busy_slot;
+ u32 ep_state;
const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
+ struct ebc_io *ebc;
+#define DWC3_EP_EBC_OUT_NB 16
+#define DWC3_EP_EBC_IN_NB 17
+
unsigned flags;
+ unsigned flags_backup;
#define DWC3_EP_ENABLED (1 << 0)
#define DWC3_EP_STALL (1 << 1)
#define DWC3_EP_WEDGE (1 << 2)
#define DWC3_EP_BUSY (1 << 4)
#define DWC3_EP_PENDING_REQUEST (1 << 5)
#define DWC3_EP_MISSED_ISOC (1 << 6)
+#define DWC3_EP_HIBERNATION (1 << 7)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
DWC3_LINK_STATE_MASK = 0x0f,
};
+enum dwc3_pm_state {
+ PM_DISCONNECTED = 0,
+ PM_ACTIVE,
+ PM_SUSPENDED,
+ PM_RESUMING,
+};
+
/* TRB Length, PCM and Status */
#define DWC3_TRB_SIZE_MASK (0x00ffffff)
#define DWC3_TRB_SIZE_LENGTH(n) ((n) & DWC3_TRB_SIZE_MASK)
unsigned direction:1;
unsigned mapped:1;
unsigned queued:1;
+ unsigned short_packet:1;
};
/*
};
/**
+ * struct dwc3_hwregs - registers saved when entering hibernation
+ */
+struct dwc3_hwregs {
+ u32 guctl;
+ u32 dcfg;
+ u32 devten;
+ u32 gctl;
+ u32 gusb3pipectl0;
+ u32 gusb2phycfg0;
+ u32 gevntadrlo;
+ u32 gevntadrhi;
+ u32 gevntsiz;
+ u32 grxthrcfg;
+};
+
+/**
* struct dwc3 - representation of our controller
* @ctrl_req: usb control request which is used for ep0
* @ep0_trb: trb which is used for the ctrl_req
unsigned needs_fifo_resize:1;
unsigned resize_fifos:1;
unsigned pullups_connected:1;
+ unsigned quirks_disable_irqthread:1;
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
struct dwc3_hwparams hwparams;
struct dentry *root;
struct debugfs_regset32 *regset;
+ enum dwc3_pm_state pm_state;
+ u8 is_otg;
+ u8 soft_connected;
u8 test_mode;
u8 test_mode_nr;
+
+ /* delayed work for handling Link State Change */
+ struct delayed_work link_work;
+
+ u8 is_ebc;
+
+ struct dwc3_scratchpad_array *scratch_array;
+ dma_addr_t scratch_array_dma;
+ void *scratch_buffer[DWC3_MAX_HIBER_SCRATCHBUFS];
+ struct dwc3_hwregs hwregs;
+ bool hiber_enabled;
};
/* -------------------------------------------------------------------------- */
struct dwc3_event_gevt gevt;
};
+struct ebc_io {
+ const char *name;
+ const char *epname;
+ u8 epnum;
+ u8 is_ondemand;
+ u8 static_trb_pool_size;
+ struct list_head list;
+ int (*init) (void);
+ void *(*alloc_static_trb_pool) (dma_addr_t *dma_addr);
+ void (*free_static_trb_pool) (void);
+ int (*xfer_start) (void);
+ int (*xfer_stop) (void);
+};
+
+void dwc3_register_io_ebc(struct ebc_io *ebc);
+void dwc3_unregister_io_ebc(struct ebc_io *ebc);
+
/*
* DWC3 Features to be used as Driver Data
*/
.release = single_release,
};
+static int dwc3_hiber_enabled_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+
+ if (dwc->hiber_enabled)
+ seq_puts(s, "hibernation enabled\n");
+ else
+ seq_puts(s, "hibernation disabled\n");
+
+ return 0;
+}
+
+static int dwc3_hiber_enabled_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_hiber_enabled_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enabled_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ char buf[32];
+ int enabled = 0;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ sscanf(buf, "%d", &enabled);
+ dwc->hiber_enabled = enabled;
+
+ return count;
+}
+
+static const struct file_operations dwc3_hiber_enabled_fops = {
+ .open = dwc3_hiber_enabled_open,
+ .write = dwc3_hiber_enabled_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
ret = -ENOMEM;
goto err1;
}
+
+ file = debugfs_create_file("hiber_enabled", S_IRUGO | S_IWUSR,
+ root, dwc, &dwc3_hiber_enabled_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
}
return 0;
--- /dev/null
+/**
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Jiebing Li
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/usb/dwc3-intel-mid.h>
+#include <linux/usb/phy.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include "debug.h"
+
+#include "core.c"
+#include "ep0.c"
+#include "gadget.c"
+
+/* FLIS register */
+#define APBFC_EXIOTG3_MISC0_REG 0xF90FF85C
+
+/* Global User Control Register Auto Retry bit*/
+#define DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN (1 << 14)
+
+/* Global Configuration Register */
+#define DWC3_GRXTHRCFG_USBRXPKTCNTSEL (1 << 29)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT(n) (n << 24)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT_MASK (0xf << 24)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE(n) (n << 19)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK (0x1f << 19)
+
+/**
+ * struct dwc3_dev_data - Structure holding platform related
+ * information
+ * @flis_reg: FLIS register
+ * @grxthrcfg: DWC3 GRXTHCFG register
+ */
+struct dwc3_dev_data {
+ struct dwc3 *dwc;
+ void __iomem *flis_reg;
+ u32 grxthrcfg;
+ struct mutex mutex;
+};
+
+static struct dwc3_dev_data *_dev_data;
+
+/*
+ * dwc3_set_fils_reg - set FLIS register
+ *
+ * This is a workaround for OTG3 IP bug of using EP #8 for host mode
+ */
+static void dwc3_set_flis_reg(void)
+{
+ u32 reg;
+ void __iomem *flis_reg;
+
+ flis_reg = _dev_data->flis_reg;
+
+ reg = dwc3_readl(flis_reg, DWC3_GLOBALS_REGS_START);
+ reg &= ~(1 << 3);
+ dwc3_writel(flis_reg, DWC3_GLOBALS_REGS_START, reg);
+}
+
+/*
+ * dwc3_disable_multi_packet - set GRXTHRCFG register to disable
+ * reception multi-packet thresholdingfor DWC2.50a.
+ */
+static void dwc3_disable_multi_packet(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ _dev_data->grxthrcfg = reg;
+ if (reg) {
+ reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNTSEL;
+ reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNT_MASK;
+ reg &= ~DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK;
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+}
+
+/*
+ * dwc3_enable_host_auto_retry - clear Auto Retry Enable bit
+ * for device mode
+ */
+static void dwc3_enable_host_auto_retry(struct dwc3 *dwc, bool enable)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+
+ if (enable)
+ reg |= DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+ else
+ reg &= ~DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+
+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+}
+
+static void dwc3_do_extra_change(struct dwc3 *dwc)
+{
+ dwc3_set_flis_reg();
+
+ if (dwc->revision == DWC3_REVISION_250A)
+ dwc3_disable_multi_packet(dwc);
+
+ dwc3_enable_host_auto_retry(dwc, false);
+}
+
+static void dwc3_enable_hibernation(struct dwc3 *dwc)
+{
+ u32 num, reg;
+
+ if (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)
+ != DWC3_GHWPARAMS1_EN_PWROPT_HIB) {
+ dev_err(dwc->dev, "Device Mode Hibernation is not supported\n");
+ return;
+ }
+
+ num = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(
+ dwc->hwparams.hwparams4);
+ if (num != 1)
+ dev_err(dwc->dev, "number of scratchpad buffer: %d\n", num);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_GBLHIBERNATIONEN;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+ dwc->scratch_array_dma & 0xffffffffU);
+}
+
+/*
+ * Re-write irq functions. Not use irq thread. Because irqthread has negative
+ * impact on usb performance, especially for usb network performance, USB3 UDP
+ * download performance will drop from 80MB/s to 40MB/s if irqthread is enabled.
+ */
+static irqreturn_t dwc3_quirks_process_event_buf(struct dwc3 *dwc, u32 buf)
+{
+ struct dwc3_event_buffer *evt;
+ u32 count;
+ u32 reg;
+ int left;
+
+ evt = dwc->ev_buffs[buf];
+
+ count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+ count &= DWC3_GEVNTCOUNT_MASK;
+ if (!count)
+ return IRQ_NONE;
+
+ evt->count = count;
+
+ /* WORKAROUND: Add 4 us delay workaround to A-unit issue in A0 stepping.
+ * Can be removed after B0.
+ */
+ if (dwc->is_otg && dwc->revision == DWC3_REVISION_210A)
+ udelay(4);
+
+ left = evt->count;
+
+ while (left > 0) {
+ union dwc3_event event;
+
+ event.raw = *(u32 *) (evt->buf + evt->lpos);
+
+ dwc3_process_event_entry(dwc, &event);
+
+ /*
+ * FIXME we wrap around correctly to the next entry as
+ * almost all entries are 4 bytes in size. There is one
+ * entry which has 12 bytes which is a regular entry
+ * followed by 8 bytes data. ATM I don't know how
+ * things are organized if we get next to the a
+ * boundary so I worry about that once we try to handle
+ * that.
+ */
+ evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ left -= 4;
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+ }
+
+ evt->count = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dwc3_quirks_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = _dwc;
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&dwc->lock);
+ if (dwc->pm_state != PM_ACTIVE) {
+ if (dwc->pm_state == PM_SUSPENDED) {
+ dev_info(dwc->dev, "u2/u3 pmu is received\n");
+ pm_runtime_get(dwc->dev);
+ dwc->pm_state = PM_RESUMING;
+ ret = IRQ_HANDLED;
+ }
+ goto out;
+ }
+
+ for (i = 0; i < dwc->num_event_buffers; i++) {
+ irqreturn_t status;
+
+ status = dwc3_quirks_process_event_buf(dwc, i);
+ if (status == IRQ_HANDLED)
+ ret = status;
+ }
+
+out:
+ spin_unlock(&dwc->lock);
+
+ return ret;
+}
+
+int dwc3_start_peripheral(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int irq;
+ int ret = 0;
+
+ pm_runtime_get_sync(dwc->dev);
+
+ mutex_lock(&_dev_data->mutex);
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->gadget_driver && dwc->soft_connected) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_init(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->hiber_enabled)
+ dwc3_enable_hibernation(dwc);
+ dwc3_do_extra_change(dwc);
+ dwc3_event_buffers_setup(dwc);
+ ret = dwc3_init_for_enumeration(dwc);
+ if (ret)
+ goto err1;
+
+ if (dwc->soft_connected)
+ dwc3_gadget_run_stop(dwc, 1);
+ }
+
+ dwc->pm_state = PM_ACTIVE;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ if (dwc->quirks_disable_irqthread)
+ ret = request_irq(irq, dwc3_quirks_interrupt,
+ IRQF_SHARED, "dwc3", dwc);
+ else
+ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+ IRQF_SHARED, "dwc3", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+ mutex_unlock(&_dev_data->mutex);
+
+ return 0;
+
+err1:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+err0:
+ mutex_unlock(&_dev_data->mutex);
+
+ return ret;
+}
+
+int dwc3_stop_peripheral(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ u8 epnum;
+ int irq;
+
+ mutex_lock(&_dev_data->mutex);
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc3_stop_active_transfers(dwc);
+
+ if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ dwc3_disconnect_gadget(dwc);
+
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ }
+
+ dwc->start_config_issued = false;
+
+ /* Clear Run/Stop bit */
+ dwc3_gadget_run_stop(dwc, 0);
+ dwc3_gadget_keep_conn(dwc, 0);
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[epnum];
+
+ if (dep->flags & DWC3_EP_ENABLED)
+ __dwc3_gadget_ep_disable(dep);
+ }
+
+ dwc3_gadget_disable_irq(dwc);
+
+ dwc3_event_buffers_cleanup(dwc);
+
+ if (_dev_data->grxthrcfg && dwc->revision == DWC3_REVISION_250A) {
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, _dev_data->grxthrcfg);
+ _dev_data->grxthrcfg = 0;
+ }
+
+ dwc3_enable_host_auto_retry(dwc, true);
+
+ if (dwc->pm_state != PM_SUSPENDED)
+ pm_runtime_put(dwc->dev);
+
+ dwc->pm_state = PM_DISCONNECTED;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ free_irq(irq, dwc);
+
+ mutex_unlock(&_dev_data->mutex);
+
+ cancel_delayed_work_sync(&dwc->link_work);
+
+ return 0;
+}
+
+static int dwc3_device_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret;
+
+ /*
+ * FIXME If pm_state is PM_RESUMING, we should wait for it to
+ * become PM_ACTIVE before continue. The chance of hitting
+ * PM_RESUMING is rare, but if so, we'll return directly.
+ *
+ * If some gadget reaches here in atomic context,
+ * pm_runtime_get_sync will cause a sleep problem.
+ */
+ if (dwc->pm_state == PM_RESUMING) {
+ dev_err(dwc->dev, "%s: PM_RESUMING, return -EIO\n", __func__);
+ return -EIO;
+ }
+
+ if (dwc->pm_state == PM_SUSPENDED)
+ pm_runtime_get_sync(dwc->dev);
+
+ is_on = !!is_on;
+
+ mutex_lock(&_dev_data->mutex);
+
+ if (dwc->soft_connected == is_on)
+ goto done;
+
+ dwc->soft_connected = is_on;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dwc->pm_state == PM_DISCONNECTED) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ goto done;
+ }
+
+ if (is_on) {
+ /* Per dwc3 databook 2.40a section 8.1.9, re-connection
+ * should follow steps described section 8.1.1 power on
+ * or soft reset.
+ */
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_init(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->hiber_enabled)
+ dwc3_enable_hibernation(dwc);
+ dwc3_do_extra_change(dwc);
+ dwc3_event_buffers_setup(dwc);
+ dwc3_init_for_enumeration(dwc);
+ ret = dwc3_gadget_run_stop(dwc, 1);
+ if (dwc->hiber_enabled)
+ dwc3_gadget_keep_conn(dwc, 1);
+ } else {
+ u8 epnum;
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[epnum];
+
+ if (dep->flags & DWC3_EP_ENABLED)
+ __dwc3_gadget_ep_disable(dep);
+ }
+
+ dwc3_stop_active_transfers(dwc);
+ dwc3_gadget_keep_conn(dwc, 0);
+ ret = dwc3_gadget_run_stop(dwc, 0);
+ dwc3_gadget_disable_irq(dwc);
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ mutex_unlock(&_dev_data->mutex);
+
+ return ret;
+
+done:
+ mutex_unlock(&_dev_data->mutex);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops dwc3_device_gadget_ops = {
+ .get_frame = dwc3_gadget_get_frame,
+ .wakeup = dwc3_gadget_wakeup,
+ .set_selfpowered = dwc3_gadget_set_selfpowered,
+ .pullup = dwc3_device_gadget_pullup,
+ .udc_start = dwc3_gadget_start,
+ .udc_stop = dwc3_gadget_stop,
+ .vbus_draw = dwc3_vbus_draw,
+};
+
+static int dwc3_device_intel_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3 *dwc;
+ struct device *dev = &pdev->dev;
+ int ret = -ENOMEM;
+ void *mem;
+
+ struct dwc_device_par *pdata;
+ struct usb_phy *usb_phy;
+ struct dwc_otg2 *otg;
+
+ mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
+ if (!mem) {
+ dev_err(dev, "not enough memory\n");
+ return -ENOMEM;
+ }
+ dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
+ dwc->mem = mem;
+
+ _dev_data = kzalloc(sizeof(*_dev_data), GFP_KERNEL);
+ if (!_dev_data) {
+ dev_err(dev, "not enough memory\n");
+ return -ENOMEM;
+ }
+
+ _dev_data->dwc = dwc;
+
+ pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data for %s.\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+
+ if (node) {
+ dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
+ dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+ } else {
+ dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+ }
+
+ if (IS_ERR(dwc->usb2_phy)) {
+ ret = PTR_ERR(dwc->usb2_phy);
+
+ /*
+ * if -ENXIO is returned, it means PHY layer wasn't
+ * enabled, so it makes no sense to return -EPROBE_DEFER
+ * in that case, since no PHY driver will ever probe.
+ */
+ if (ret == -ENXIO)
+ return ret;
+
+ dev_err(dev, "no usb2 phy configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (IS_ERR(dwc->usb3_phy)) {
+ ret = PTR_ERR(dwc->usb2_phy);
+
+ /*
+ * if -ENXIO is returned, it means PHY layer wasn't
+ * enabled, so it makes no sense to return -EPROBE_DEFER
+ * in that case, since no PHY driver will ever probe.
+ */
+ if (ret == -ENXIO)
+ return ret;
+
+ dev_err(dev, "no usb3 phy configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ mutex_init(&_dev_data->mutex);
+ spin_lock_init(&dwc->lock);
+ platform_set_drvdata(pdev, dwc);
+
+ dwc->regs = pdata->io_addr + DWC3_GLOBALS_REGS_START;
+ dwc->regs_size = pdata->len - DWC3_GLOBALS_REGS_START;
+ dwc->dev = dev;
+
+ dev->dma_mask = dev->parent->dma_mask;
+ dev->dma_parms = dev->parent->dma_parms;
+ dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+
+ if (!strncmp("super", maximum_speed, 5))
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+ else if (!strncmp("high", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+ else if (!strncmp("full", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+ else if (!strncmp("low", maximum_speed, 3))
+ dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+ else
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
+ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_get_sync(dev);
+ pm_runtime_forbid(dev);
+
+ dwc3_cache_hwparams(dwc);
+ dwc3_core_num_eps(dwc);
+
+ _dev_data->flis_reg =
+ ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+ if (ret) {
+ dev_err(dwc->dev, "failed to allocate event buffers\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /*
+ * Not use irq thread, because irqthread has negative impact
+ * on usb performance, especially for usb network performance.
+ */
+ dwc->quirks_disable_irqthread = 1;
+
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ otg = container_of(usb_phy, struct dwc_otg2, usb2_phy);
+ otg->start_device = dwc3_start_peripheral;
+ otg->stop_device = dwc3_stop_peripheral;
+ otg->vbus_draw = dwc3_vbus_draw;
+ usb_put_phy(usb_phy);
+ dwc->is_otg = 1;
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto err0;
+ }
+ dwc->gadget.ops = &dwc3_device_gadget_ops;
+ dwc->gadget.is_otg = 1;
+
+ dwc->mode = DWC3_MODE_DEVICE;
+
+ ret = dwc3_debugfs_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize debugfs\n");
+ goto err1;
+ }
+
+ pm_runtime_allow(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+err1:
+ dwc3_gadget_exit(dwc);
+
+err0:
+ dwc3_free_event_buffers(dwc);
+
+ return ret;
+}
+
+static int dwc3_device_intel_remove(struct platform_device *pdev)
+{
+ iounmap(_dev_data->flis_reg);
+
+ dwc3_remove(pdev);
+
+ kfree(_dev_data);
+ _dev_data = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static const struct dev_pm_ops dwc3_device_pm_ops = {
+ .runtime_suspend = dwc3_runtime_suspend,
+ .runtime_resume = dwc3_runtime_resume,
+};
+#define DWC3_DEVICE_PM_OPS (&dwc3_device_pm_ops)
+#else
+#define DWC3_DEVICE_PM_OPS NULL
+#endif
+
+static struct platform_driver dwc3_device_intel_driver = {
+ .probe = dwc3_device_intel_probe,
+ .remove = dwc3_device_intel_remove,
+ .driver = {
+ .name = "dwc3-device",
+ .of_match_table = of_match_ptr(of_dwc3_match),
+ .pm = DWC3_DEVICE_PM_OPS,
+ },
+};
+
+module_platform_driver(dwc3_device_intel_driver);
+
+MODULE_ALIAS("platform:dwc3");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
--- /dev/null
+/*
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Yu Wang
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include "../host/xhci.h"
+#include "core.h"
+#include "otg.h"
+
+#define WAIT_DISC_EVENT_COMPLETE_TIMEOUT 5 /* 100ms */
+
+static int otg_irqnum;
+
+static int dwc3_start_host(struct usb_hcd *hcd);
+static int dwc3_stop_host(struct usb_hcd *hcd);
+static struct platform_driver dwc3_xhci_driver;
+
+static void xhci_dwc3_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+ /*
+ * As of now platform drivers don't provide MSI support so we ensure
+ * here that the generic code does not try to make a pci_dev from our
+ * dev struct in order to setup MSI
+ *
+ * Synopsys DWC3 controller will generate PLC when link transfer to
+ * compliance/loopback mode.
+ */
+ xhci->quirks |= XHCI_PLAT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_dwc3_setup(struct usb_hcd *hcd)
+{
+ return xhci_gen_setup(hcd, xhci_dwc3_quirks);
+}
+
+static int xhci_dwc_bus_resume(struct usb_hcd *hcd)
+{
+ int ret;
+
+ /* before resume bus, delay 1ms to waiting core stable */
+ mdelay(1);
+
+ ret = xhci_bus_resume(hcd);
+ return ret;
+}
+
+static const struct hc_driver xhci_dwc_hc_driver = {
+ .description = "dwc-xhci",
+ .product_desc = "xHCI Host Controller",
+ .hcd_priv_size = sizeof(struct xhci_hcd *),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = xhci_irq,
+ .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = xhci_dwc3_setup,
+ .start = xhci_run,
+ .stop = xhci_stop,
+ .shutdown = xhci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = xhci_urb_enqueue,
+ .urb_dequeue = xhci_urb_dequeue,
+ .alloc_dev = xhci_alloc_dev,
+ .free_dev = xhci_free_dev,
+ .alloc_streams = xhci_alloc_streams,
+ .free_streams = xhci_free_streams,
+ .add_endpoint = xhci_add_endpoint,
+ .drop_endpoint = xhci_drop_endpoint,
+ .endpoint_reset = xhci_endpoint_reset,
+ .check_bandwidth = xhci_check_bandwidth,
+ .reset_bandwidth = xhci_reset_bandwidth,
+ .address_device = xhci_address_device,
+ .update_hub_device = xhci_update_hub_device,
+ .reset_device = xhci_discover_or_reset_device,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = xhci_get_frame,
+
+ /* Root hub support */
+ .hub_control = xhci_hub_control,
+ .hub_status_data = xhci_hub_status_data,
+ .bus_suspend = xhci_bus_suspend,
+ .bus_resume = xhci_dwc_bus_resume,
+};
+
+static int if_usb_devices_connected(struct xhci_hcd *xhci)
+{
+ struct usb_device *usb_dev;
+ int i, connected_devices = 0;
+
+ if (!xhci)
+ return -EINVAL;
+
+ usb_dev = xhci->main_hcd->self.root_hub;
+ for (i = 1; i <= usb_dev->maxchild; ++i) {
+ if (usb_hub_find_child(usb_dev, i))
+ connected_devices++;
+ }
+
+ usb_dev = xhci->shared_hcd->self.root_hub;
+ for (i = 1; i <= usb_dev->maxchild; ++i) {
+ if (usb_hub_find_child(usb_dev, i))
+ connected_devices++;
+ }
+
+ if (connected_devices)
+ return 1;
+
+ return 0;
+}
+
+static void dwc_xhci_enable_phy_auto_resume(struct usb_hcd *hcd, bool enable)
+{
+ u32 val;
+
+ val = readl(hcd->regs + GUSB2PHYCFG0);
+ val |= GUSB2PHYCFG_ULPI_EXT_VBUS_DRV;
+ if (enable)
+ val |= GUSB2PHYCFG_ULPI_AUTO_RESUME;
+ else
+ val &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+ writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+static void dwc_xhci_enable_phy_suspend(struct usb_hcd *hcd, bool enable)
+{
+ u32 val;
+
+ val = readl(hcd->regs + GUSB3PIPECTL0);
+ if (enable)
+ val |= GUSB3PIPECTL_SUS_EN;
+ else
+ val &= ~GUSB3PIPECTL_SUS_EN;
+ writel(val, hcd->regs + GUSB3PIPECTL0);
+
+ val = readl(hcd->regs + GUSB2PHYCFG0);
+ if (enable)
+ val |= GUSB2PHYCFG_SUS_PHY;
+ else
+ val &= ~GUSB2PHYCFG_SUS_PHY;
+ writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+static void dwc_silicon_wa(struct usb_hcd *hcd)
+{
+ void __iomem *addr;
+ u32 val;
+
+ /* Clear GUCTL bit 15 as workaround of DWC controller Bugs
+ * This Bug cause the xHCI driver does not see any
+ * transfer complete events for certain EP after exit
+ * from hibernation mode.*/
+ val = readl(hcd->regs + GUCTL);
+ val &= ~GUCTL_CMDEVADDR;
+ writel(val, hcd->regs + GUCTL);
+
+ /* Disable OTG3-EXI interface by default. It is one
+ * workaround for silicon BUG. It will cause transfer
+ * failed on EP#8 of any USB device.
+ */
+ addr = ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+ val = readl(addr);
+ val |= (1 << 3);
+ writel(val, addr);
+ iounmap(addr);
+}
+
+static void dwc_core_reset(struct usb_hcd *hcd)
+{
+ u32 val;
+
+ val = readl(hcd->regs + GCTL);
+ val |= GCTL_CORESOFTRESET;
+ writel(val, hcd->regs + GCTL);
+
+ val = readl(hcd->regs + GUSB3PIPECTL0);
+ val |= GUSB3PIPECTL_PHYSOFTRST;
+ writel(val, hcd->regs + GUSB3PIPECTL0);
+
+ val = readl(hcd->regs + GUSB2PHYCFG0);
+ val |= GUSB2PHYCFG_PHYSOFTRST;
+ writel(val, hcd->regs + GUSB2PHYCFG0);
+
+ msleep(100);
+
+ val = readl(hcd->regs + GUSB3PIPECTL0);
+ val &= ~GUSB3PIPECTL_PHYSOFTRST;
+ writel(val, hcd->regs + GUSB3PIPECTL0);
+
+ val = readl(hcd->regs + GUSB2PHYCFG0);
+ val &= ~GUSB2PHYCFG_PHYSOFTRST;
+ writel(val, hcd->regs + GUSB2PHYCFG0);
+
+ msleep(20);
+
+ val = readl(hcd->regs + GCTL);
+ val &= ~GCTL_CORESOFTRESET;
+ writel(val, hcd->regs + GCTL);
+}
+
+/*
+ * On MERR platform, the suspend clock is 19.2MHz.
+ * Hence PwrDnScale = 19200 / 16 = 1200 (= 0x4B0).
+ * To account for possible jitter of suspend clock and to have margin,
+ * So recommend it to be set to 1250 (= 0x4E2).
+ * */
+static void dwc_set_ssphy_p3_clockrate(struct usb_hcd *hcd)
+{
+ u32 gctl;
+
+ gctl = readl(hcd->regs + GCTL);
+ gctl &= ~GCTL_PWRDNSCALE_MASK;
+ gctl |= GCTL_PWRDNSCALE(0x4E2);
+ writel(gctl, hcd->regs + GCTL);
+}
+
+static ssize_t
+show_pm_get(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(_dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ pm_runtime_put(hcd->self.controller);
+ return 0;
+
+}
+static ssize_t store_pm_get(struct device *_dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(_dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ pm_runtime_get(hcd->self.controller);
+ return count;
+
+}
+static DEVICE_ATTR(pm_get, S_IRUGO|S_IWUSR|S_IWGRP,
+ show_pm_get, store_pm_get);
+
+static void dwc_set_host_mode(struct usb_hcd *hcd)
+{
+ writel(0x45801000, hcd->regs + GCTL);
+
+ msleep(20);
+}
+
+static int dwc3_start_host(struct usb_hcd *hcd)
+{
+ int ret = -EINVAL;
+ struct xhci_hcd *xhci;
+ struct usb_hcd *xhci_shared_hcd;
+
+ if (!hcd)
+ return ret;
+
+ if (hcd->rh_registered) {
+ dev_dbg(hcd->self.controller,
+ "%s() - Already registered", __func__);
+ return 0;
+ }
+
+ pm_runtime_get_sync(hcd->self.controller);
+
+ dwc_core_reset(hcd);
+ dwc_silicon_wa(hcd);
+ dwc_set_host_mode(hcd);
+ dwc_set_ssphy_p3_clockrate(hcd);
+
+ /* Clear the hcd->flags.
+ * To prevent incorrect flags set during last time. */
+ hcd->flags = 0;
+
+ ret = usb_add_hcd(hcd, otg_irqnum, IRQF_SHARED);
+ if (ret)
+ return -EINVAL;
+
+ xhci = hcd_to_xhci(hcd);
+ xhci->shared_hcd = usb_create_shared_hcd(&xhci_dwc_hc_driver,
+ hcd->self.controller, dev_name(hcd->self.controller), hcd);
+ if (!xhci->shared_hcd) {
+ ret = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ xhci->quirks |= XHCI_PLAT;
+
+ /* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset)
+ * is called by usb_add_hcd().
+ */
+ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+ xhci->shared_hcd->regs = hcd->regs;
+
+ xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+ xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
+
+ ret = usb_add_hcd(xhci->shared_hcd, otg_irqnum, IRQF_SHARED);
+ if (ret)
+ goto put_usb3_hcd;
+
+ pm_runtime_put(hcd->self.controller);
+
+ ret = device_create_file(hcd->self.controller, &dev_attr_pm_get);
+ if (ret < 0)
+ dev_err(hcd->self.controller,
+ "Can't register sysfs attribute: %d\n", ret);
+
+ dwc3_xhci_driver.shutdown = usb_hcd_platform_shutdown;
+
+ return ret;
+
+put_usb3_hcd:
+ if (xhci->shared_hcd) {
+ xhci_shared_hcd = xhci->shared_hcd;
+ usb_remove_hcd(xhci_shared_hcd);
+ usb_put_hcd(xhci_shared_hcd);
+ }
+
+dealloc_usb2_hcd:
+ local_irq_disable();
+ usb_hcd_irq(0, hcd);
+ local_irq_enable();
+ usb_remove_hcd(hcd);
+
+ kfree(xhci);
+ *((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+ pm_runtime_put(hcd->self.controller);
+ return ret;
+}
+
+static int dwc3_stop_host(struct usb_hcd *hcd)
+{
+ int count = 0;
+ struct xhci_hcd *xhci;
+ struct usb_hcd *xhci_shared_hcd;
+
+ if (!hcd)
+ return -EINVAL;
+
+ xhci = hcd_to_xhci(hcd);
+
+ pm_runtime_get_sync(hcd->self.controller);
+
+ /* When plug out micro A cable, there will be two flows be executed.
+ * The first one is xHCI controller get disconnect event. The
+ * second one is PMIC get ID change event. During these events
+ * handling, they both try to call usb_disconnect. Then met some
+ * conflicts and cause kernel panic.
+ * So treat disconnect event as first priority, handle the ID change
+ * event until disconnect event handled done.*/
+ while (if_usb_devices_connected(xhci)) {
+ msleep(20);
+ if (count++ > WAIT_DISC_EVENT_COMPLETE_TIMEOUT)
+ break;
+ };
+ dwc3_xhci_driver.shutdown = NULL;
+
+ if (xhci->shared_hcd) {
+ xhci_shared_hcd = xhci->shared_hcd;
+ usb_remove_hcd(xhci_shared_hcd);
+ usb_put_hcd(xhci_shared_hcd);
+ }
+
+ usb_remove_hcd(hcd);
+
+ kfree(xhci);
+ *((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+ dwc_xhci_enable_phy_suspend(hcd, false);
+
+ pm_runtime_put(hcd->self.controller);
+ device_remove_file(hcd->self.controller, &dev_attr_pm_get);
+ return 0;
+}
+static int xhci_dwc_drv_probe(struct platform_device *pdev)
+{
+ struct dwc_otg2 *otg;
+ struct usb_phy *usb_phy;
+ struct dwc_device_par *pdata;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_debug("initializing FSL-SOC USB Controller\n");
+
+ /* Need platform data for setup */
+ pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "No platform data for %s.\n", dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+ otg_irqnum = res->start;
+
+ hcd = usb_create_hcd(&xhci_dwc_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ return retval;
+ }
+
+ hcd->regs = pdata->io_addr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start;
+
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (usb_phy)
+ otg_set_host(usb_phy->otg, &hcd->self);
+
+ otg = container_of(usb_phy->otg, struct dwc_otg2, otg);
+ if (otg) {
+ otg->start_host = dwc3_start_host;
+ otg->stop_host = dwc3_stop_host;
+ }
+
+
+ usb_put_phy(usb_phy);
+
+ /* Enable wakeup irq */
+ hcd->has_wakeup_irq = 1;
+
+ platform_set_drvdata(pdev, hcd);
+ pm_runtime_enable(hcd->self.controller);
+
+ return retval;
+}
+
+static int xhci_dwc_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_phy *usb_phy;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ otg_set_host(usb_phy->otg, NULL);
+ usb_put_phy(usb_phy);
+
+ if (xhci)
+ dwc3_stop_host(hcd);
+ usb_put_hcd(hcd);
+
+ pm_runtime_disable(hcd->self.controller);
+ pm_runtime_set_suspended(hcd->self.controller);
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Do nothing in runtime pm callback.
+ * On HVP platform, if make controller go to hibernation mode.
+ * controller will not send IRQ until restore status which
+ * implement in pm runtime resume callback. So there is no
+ * any one can trigger pm_runtime_get to resume USB3 device.
+ * This issue need to continue investigate. So just implement SW logic at here.
+ */
+static int dwc_hcd_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+/* dwc_hcd_suspend_common and dwc_hcd_resume_common are refer to
+ * suspend_common and resume_common in usb core.
+ * Because the usb core function just support PCI device.
+ * So re-write them in here to support platform devices.
+ */
+static int dwc_hcd_suspend_common(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int retval = 0;
+ u32 data = 0;
+
+ if (!xhci) {
+ dev_dbg(dev, "%s: host already stop!\n", __func__);
+ return 0;
+ }
+
+ /* Root hub suspend should have stopped all downstream traffic,
+ * and all bus master traffic. And done so for both the interface
+ * and the stub usb_device (which we check here). But maybe it
+ * didn't; writing sysfs power/state files ignores such rules...
+ */
+ if (HCD_RH_RUNNING(hcd)) {
+ dev_warn(dev, "Root hub is not suspended\n");
+ return -EBUSY;
+ }
+ if (hcd->shared_hcd) {
+ hcd = hcd->shared_hcd;
+ if (HCD_RH_RUNNING(hcd)) {
+ dev_warn(dev, "Secondary root hub is not suspended\n");
+ return -EBUSY;
+ }
+ }
+
+ if (!HCD_DEAD(hcd)) {
+ /* Optimization: Don't suspend if a root-hub wakeup is
+ * pending and it would cause the HCD to wake up anyway.
+ */
+ if (HCD_WAKEUP_PENDING(hcd))
+ return -EBUSY;
+ if (hcd->shared_hcd &&
+ HCD_WAKEUP_PENDING(hcd->shared_hcd))
+ return -EBUSY;
+ if (hcd->state != HC_STATE_SUSPENDED ||
+ xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ retval = -EINVAL;
+
+ if (!retval) {
+ /* The auto-resume is diabled by default. Need enable it
+ * if there have valid connection. To ensure that when
+ * device resumes, host does resume reflect within
+ * 900 usec as in USB spec.
+ */
+ if (if_usb_devices_connected(xhci) == 1)
+ dwc_xhci_enable_phy_auto_resume(
+ xhci->main_hcd, true);
+
+ /* Ensure that suspend enable are set for
+ * USB2 and USB3 PHY
+ */
+ dwc_xhci_enable_phy_suspend(hcd, true);
+
+ data = readl(hcd->regs + GCTL);
+ data |= GCTL_GBL_HIBERNATION_EN;
+ writel(data, hcd->regs + GCTL);
+ dev_dbg(hcd->self.controller, "set xhci hibernation enable!\n");
+ retval = xhci_suspend(xhci);
+ }
+
+ /* Check again in case wakeup raced with pci_suspend */
+ if ((retval == 0 && HCD_WAKEUP_PENDING(hcd)) ||
+ (retval == 0 && hcd->shared_hcd &&
+ HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
+ xhci_resume(xhci, false);
+ retval = -EBUSY;
+ }
+ if (retval)
+ return retval;
+ }
+
+ synchronize_irq(otg_irqnum);
+
+ return retval;
+
+}
+
+static int dwc_hcd_resume_common(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int retval = 0;
+
+ if (!xhci)
+ return 0;
+
+ if (HCD_RH_RUNNING(hcd) ||
+ (hcd->shared_hcd &&
+ HCD_RH_RUNNING(hcd->shared_hcd))) {
+ dev_dbg(dev, "can't resume, not suspended!\n");
+ return 0;
+ }
+
+ if (!HCD_DEAD(hcd)) {
+ retval = xhci_resume(xhci, false);
+ if (retval) {
+ dev_err(dev, "PCI post-resume error %d!\n", retval);
+ if (hcd->shared_hcd)
+ usb_hc_died(hcd->shared_hcd);
+ usb_hc_died(hcd);
+ }
+ }
+
+ dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+ return retval;
+}
+
+static int dwc_hcd_runtime_suspend(struct device *dev)
+{
+ int retval;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ retval = dwc_hcd_suspend_common(dev);
+
+ if (retval)
+ dwc_xhci_enable_phy_auto_resume(
+ hcd, false);
+
+ dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
+ return retval;
+}
+
+static int dwc_hcd_runtime_resume(struct device *dev)
+{
+ int retval;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ dwc_xhci_enable_phy_auto_resume(
+ hcd, false);
+
+ retval = dwc_hcd_resume_common(dev);
+ dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+ return retval;
+}
+#else
+#define dwc_hcd_runtime_idle NULL
+#define dwc_hcd_runtime_suspend NULL
+#define dwc_hcd_runtime_resume NULL
+#endif
+
+
+static int dwc_hcd_suspend(struct device *dev)
+{
+ int retval;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ retval = dwc_hcd_suspend_common(dev);
+
+ if (retval)
+ dwc_xhci_enable_phy_auto_resume(
+ hcd, false);
+
+ dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
+ return retval;
+}
+
+static int dwc_hcd_resume(struct device *dev)
+{
+ int retval;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ dwc_xhci_enable_phy_auto_resume(
+ hcd, false);
+
+ retval = dwc_hcd_resume_common(dev);
+ dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+ return retval;
+}
+
+static const struct dev_pm_ops dwc_usb_hcd_pm_ops = {
+ .runtime_suspend = dwc_hcd_runtime_suspend,
+ .runtime_resume = dwc_hcd_runtime_resume,
+ .runtime_idle = dwc_hcd_runtime_idle,
+ .suspend = dwc_hcd_suspend,
+ .resume = dwc_hcd_resume,
+};
+#endif
+
+static struct platform_driver dwc3_xhci_driver = {
+ .probe = xhci_dwc_drv_probe,
+ .remove = xhci_dwc_drv_remove,
+ .driver = {
+ .name = "dwc3-host",
+#ifdef CONFIG_PM
+ .pm = &dwc_usb_hcd_pm_ops,
+#endif
+ },
+};
--- /dev/null
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+static int otg_id = -1;
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off);
+static int dwc3_intel_byt_notify_charger_type(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event);
+
+static int charger_detect_enable(struct dwc_otg2 *otg)
+{
+ struct intel_dwc_otg_pdata *data;
+
+ if (!otg || !otg->otg_data)
+ return 0;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ return data->charger_detect_enable;
+}
+
+static int sdp_charging(struct dwc_otg2 *otg)
+{
+ struct intel_dwc_otg_pdata *data;
+
+ if (!otg || !otg->otg_data)
+ return 0;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ return data->sdp_charging;
+}
+
+static void usb2phy_eye_optimization(struct dwc_otg2 *otg)
+{
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy)
+ return;
+
+ /* Set 0x7f for better quality in eye diagram
+ * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+ usb_phy_io_write(phy, 0x4f, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+ usb_put_phy(phy);
+}
+
+static int dwc_otg_charger_hwdet(bool enable)
+{
+ int retval;
+ struct usb_phy *phy;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+
+ /* Just return if charger detection is not enabled */
+ if (!charger_detect_enable(otg))
+ return 0;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy)
+ return -ENODEV;
+
+ if (enable) {
+ retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+ TUSB1211_POWER_CONTROL_SET);
+ if (retval)
+ return retval;
+ otg_dbg(otg, "set HWDETECT\n");
+ } else {
+ retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+ TUSB1211_POWER_CONTROL_CLR);
+ if (retval)
+ return retval;
+ otg_dbg(otg, "clear HWDETECT\n");
+ }
+ usb_put_phy(phy);
+
+ return 0;
+}
+
+static ssize_t store_vbus_evt(struct device *_dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+
+ if (count != 2) {
+ otg_err(otg, "return EINVAL\n");
+ return -EINVAL;
+ }
+
+ if (count > 0 && buf[count-1] == '\n')
+ ((char *) buf)[count-1] = 0;
+
+ switch (buf[0]) {
+ case '1':
+ otg_dbg(otg, "Change the VBUS to High\n");
+ otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ case '0':
+ otg_dbg(otg, "Change the VBUS to Low\n");
+ otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+static DEVICE_ATTR(vbus_evt, S_IWUSR|S_IWGRP,
+ NULL, store_vbus_evt);
+
+
+static ssize_t store_otg_id(struct device *_dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+
+ if (!otg)
+ return 0;
+ if (count != 2) {
+ otg_err(otg, "return EINVAL\n");
+ return -EINVAL;
+ }
+
+ if (count > 0 && buf[count-1] == '\n')
+ ((char *) buf)[count-1] = 0;
+
+ switch (buf[0]) {
+ case 'a':
+ case 'A':
+ otg_dbg(otg, "Change ID to A\n");
+ otg->user_events |= USER_ID_A_CHANGE_EVENT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ otg_id = 0;
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ case 'b':
+ case 'B':
+ otg_dbg(otg, "Change ID to B\n");
+ otg->user_events |= USER_ID_B_CHANGE_EVENT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ otg_id = 1;
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ default:
+ otg_err(otg, "Just support change ID to A!\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+show_otg_id(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ char *next;
+ unsigned size, t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size,
+ "USB OTG ID: %s\n",
+ (otg_id ? "B" : "A")
+ );
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(otg_id, S_IRUGO|S_IWUSR|S_IWGRP,
+ show_otg_id, store_otg_id);
+
+static void set_sus_phy(struct dwc_otg2 *otg, int bit)
+{
+ u32 data = 0;
+
+ data = otg_read(otg, GUSB2PHYCFG0);
+ if (bit)
+ data |= GUSB2PHYCFG_SUS_PHY;
+ else
+ data &= ~GUSB2PHYCFG_SUS_PHY;
+
+ otg_write(otg, GUSB2PHYCFG0, data);
+
+ data = otg_read(otg, GUSB3PIPECTL0);
+ if (bit)
+ data |= GUSB3PIPECTL_SUS_EN;
+ else
+ data &= ~GUSB3PIPECTL_SUS_EN;
+ otg_write(otg, GUSB3PIPECTL0, data);
+}
+
+static int dwc3_check_gpio_id(struct dwc_otg2 *otg2)
+{
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ struct intel_dwc_otg_pdata *data;
+ int id = 0;
+ int next = 0;
+ int count = 0;
+ unsigned long timeout;
+
+ otg_dbg(otg, "start check gpio id\n");
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ /* Polling ID GPIO PIN value for SW debounce as HW debouce chip
+ * is not connected on BYT CR board */
+ if (data && data->gpio_id) {
+ id = gpio_get_value(data->gpio_id);
+
+ /* If get 20 of the same value in a row by GPIO read,
+ * then end SW debouce and return the ID value.
+ * the total length of debouce time is 80ms~100ms for
+ * 20 times GPIO read on BYT CR, which is longer than
+ * normal debounce time done by HW chip.
+ * Also set 200ms timeout value to avoid impact from
+ * pin unstable cases */
+ timeout = jiffies + msecs_to_jiffies(200);
+ while ((count < 20) && (!time_after(jiffies, timeout))) {
+ next = gpio_get_value(data->gpio_id);
+ otg_dbg(otg, "id value pin %d = %d\n",
+ data->gpio_id, next);
+ if (next < 0)
+ return -EINVAL;
+ else if (id == next)
+ count++;
+ else {
+ id = next;
+ count = 0;
+ }
+ }
+ if (count >= 20) {
+ otg_dbg(otg, "id debounce done = %d\n", id);
+ return id;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static irqreturn_t dwc3_gpio_id_irq(int irq, void *dev)
+{
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ struct intel_dwc_otg_pdata *data;
+ int id;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ id = dwc3_check_gpio_id(otg);
+ if (id == 0 || id == 1) {
+ if (data->id != id) {
+ data->id = id;
+ dev_info(otg->dev, "ID notification (id = %d)\n",
+ data->id);
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_ID, &id);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dwc_otg_suspend_discon_work(struct work_struct *work)
+{
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ unsigned long flags;
+
+ otg_dbg(otg, "start suspend_disconn work\n");
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+ otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+ dwc3_wakeup_otg_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+int dwc3_intel_byt_platform_init(struct dwc_otg2 *otg)
+{
+ u32 gctl;
+ int id_value;
+ int retval;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ if (data)
+ INIT_DELAYED_WORK(&data->suspend_discon_work,
+ dwc_otg_suspend_discon_work);
+
+ if (data && data->gpio_cs && data->gpio_reset) {
+ retval = gpio_request(data->gpio_cs, "phy_cs");
+ if (retval < 0) {
+ otg_err(otg, "failed to request CS pin %d\n",
+ data->gpio_cs);
+ return retval;
+ }
+
+ retval = gpio_request(data->gpio_reset, "phy_reset");
+ if (retval < 0) {
+ otg_err(otg, "failed to request RESET pin %d\n",
+ data->gpio_reset);
+ return retval;
+ }
+ }
+
+ if (data && data->gpio_id) {
+ dev_info(otg->dev, "USB ID detection - Enabled - GPIO\n");
+
+ /* Set ID default value to 1 Floating */
+ data->id = 1;
+
+ retval = gpio_request(data->gpio_id, "gpio_id");
+ if (retval < 0) {
+ otg_err(otg, "failed to request ID pin %d\n",
+ data->gpio_id);
+ return retval;
+ }
+
+ retval = gpio_direction_input(data->gpio_id);
+ if (retval < 0) {
+ otg_err(otg, "failed to request ID pin %d\n",
+ data->gpio_id);
+ return retval;
+ }
+
+ retval = request_threaded_irq(gpio_to_irq(data->gpio_id),
+ NULL, dwc3_gpio_id_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "dwc-gpio-id", otg->dev);
+
+ if (retval < 0) {
+ otg_err(otg, "failed to request interrupt gpio ID\n");
+ return retval;
+ }
+
+ otg_dbg(otg, "GPIO ID request/Interrupt reuqest Done\n");
+
+ id_value = dwc3_check_gpio_id(otg);
+ if ((id_value == 0 || id_value == 1) &&
+ (data->id != id_value)) {
+ data->id = id_value;
+ dev_info(otg->dev, "ID notification (id = %d)\n",
+ data->id);
+
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_ID, &id_value);
+ } else
+ otg_dbg(otg, "Get incorrect ID value %d\n", id_value);
+ }
+
+ /* Don't let phy go to suspend mode, which
+ * will cause FS/LS devices enum failed in host mode.
+ */
+ set_sus_phy(otg, 0);
+
+ retval = device_create_file(otg->dev, &dev_attr_otg_id);
+ if (retval < 0) {
+ otg_dbg(otg,
+ "Can't register sysfs attribute: %d\n", retval);
+ return -ENOMEM;
+ }
+
+ retval = device_create_file(otg->dev, &dev_attr_vbus_evt);
+ if (retval < 0) {
+ otg_dbg(otg,
+ "Can't register sysfs attribute: %d\n", retval);
+ return -ENOMEM;
+ }
+
+ otg_dbg(otg, "\n");
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, OCTL, 0);
+ gctl = otg_read(otg, GCTL);
+ gctl |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, gctl);
+
+ return 0;
+}
+
+/* Disable auto-resume feature for USB2 PHY. This is one
+ * silicon workaround. It will cause fabric timeout error
+ * for LS case after resume from hibernation */
+static void disable_phy_auto_resume(struct dwc_otg2 *otg)
+{
+ u32 data = 0;
+
+ data = otg_read(otg, GUSB2PHYCFG0);
+ data &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+ otg_write(otg, GUSB2PHYCFG0, data);
+}
+
+/* This function will control VUSBPHY to power gate/ungate USBPHY */
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off)
+{
+ struct intel_dwc_otg_pdata *data;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ if (data && data->gpio_cs && data->gpio_reset) {
+ if (on_off) {
+ /* Turn ON phy via CS pin */
+ gpio_direction_output(data->gpio_cs, 1);
+ usleep_range(200, 300);
+
+ /* Do PHY reset after enable the PHY */
+ gpio_direction_output(data->gpio_reset, 0);
+ usleep_range(200, 500);
+ gpio_set_value(data->gpio_reset, 1);
+ msleep(30);
+ } else {
+ /* Turn OFF phy via CS pin */
+ gpio_direction_output(data->gpio_cs, 0);
+ }
+ }
+ return 0;
+}
+
+int dwc3_intel_byt_get_id(struct dwc_otg2 *otg)
+{
+ /* For BYT ID is not connected to USB, always FLOAT */
+ return RID_FLOAT;
+}
+
+int dwc3_intel_byt_b_idle(struct dwc_otg2 *otg)
+{
+ u32 gctl, tmp;
+
+ enable_usb_phy(otg, false);
+ dwc_otg_charger_hwdet(false);
+
+ /* Disable hibernation mode by default */
+ gctl = otg_read(otg, GCTL);
+ gctl &= ~GCTL_GBL_HIBERNATION_EN;
+ otg_write(otg, GCTL, gctl);
+
+ /* Reset ADP related registers */
+ otg_write(otg, ADPCFG, 0);
+ otg_write(otg, ADPCTL, 0);
+ otg_write(otg, ADPEVTEN, 0);
+ tmp = otg_read(otg, ADPEVT);
+ otg_write(otg, ADPEVT, tmp);
+
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OEVTEN, 0);
+ tmp = otg_read(otg, OEVT);
+ otg_write(otg, OEVT, tmp);
+ otg_write(otg, OCTL, OCTL_PERI_MODE);
+
+ /* Force config to device mode as default */
+ gctl = otg_read(otg, GCTL);
+ gctl &= ~GCTL_PRT_CAP_DIR;
+ gctl |= GCTL_PRT_CAP_DIR_DEV << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, gctl);
+
+ mdelay(100);
+
+ return 0;
+}
+
+static int dwc3_intel_byt_set_power(struct usb_phy *_otg,
+ unsigned ma)
+{
+ unsigned long flags;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ struct power_supply_cable_props cap;
+ struct intel_dwc_otg_pdata *data;
+
+ /* Just return if charger detection is not enabled */
+ if (!charger_detect_enable(otg))
+ return 0;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ /* Needn't notify charger capability if charger_detection disable */
+ if (!charger_detect_enable(otg) && !sdp_charging(otg))
+ return 0;
+ else if (otg->charging_cap.chrg_type !=
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+ otg_err(otg, "%s: currently, chrg type is not SDP!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ma == OTG_DEVICE_SUSPEND) {
+ spin_lock_irqsave(&otg->lock, flags);
+ cap.chrg_type = otg->charging_cap.chrg_type;
+ cap.ma = otg->charging_cap.ma;
+ cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ /* ma is zero mean D+/D- opened cable.
+ * If SMIP set, then notify 500ma.
+ * Otherwise, notify 0ma.
+ */
+ if (!cap.ma) {
+ if (data->charging_compliance) {
+ cap.ma = 500;
+ cap.chrg_evt =
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ }
+ /* For standard SDP, if SMIP set, then ignore suspend */
+ } else if (data->charging_compliance)
+ return 0;
+ /* Stander SDP(cap.ma != 0) and SMIP not set.
+ * Should send 0ma with SUSPEND event
+ */
+ else
+ cap.ma = 2;
+
+ if (sdp_charging(otg))
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_ENUMERATED, &cap.ma);
+ else
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_CHARGER, &cap);
+ otg_dbg(otg, "Notify EM CHARGER_EVENT_SUSPEND\n");
+
+ return 0;
+ } else if (ma == OTG_DEVICE_RESUME) {
+ otg_dbg(otg, "Notify EM CHARGER_EVENT_CONNECT\n");
+ dwc3_intel_byt_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+ return 0;
+ }
+
+ /* For SMIP set case, only need to report 500/900ma */
+ if (data->charging_compliance) {
+ if ((ma != OTG_USB2_500MA) &&
+ (ma != OTG_USB3_900MA))
+ return 0;
+ }
+
+ /* Covert macro to integer number*/
+ switch (ma) {
+ case OTG_USB2_100MA:
+ ma = 100;
+ break;
+ case OTG_USB3_150MA:
+ ma = 150;
+ break;
+ case OTG_USB2_500MA:
+ ma = 500;
+ break;
+ case OTG_USB3_900MA:
+ ma = 900;
+ break;
+ default:
+ otg_err(otg, "Device driver set invalid SDP current value!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.ma = ma;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ dwc3_intel_byt_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+ return 0;
+}
+
+int dwc3_intel_byt_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+ /* Return 0, as VBUS is controlled by FSA in BYT */
+ return 0;
+}
+
+static int dwc3_intel_byt_notify_charger_type(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event)
+{
+ struct power_supply_cable_props cap;
+ unsigned long flags;
+
+ /* Just return if charger detection is not enabled */
+ if (!charger_detect_enable(otg) && !sdp_charging(otg))
+ return 0;
+
+ if (event > POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+ otg_err(otg,
+ "%s: Invalid power_supply_charger_event!\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP) &&
+ ((otg->charging_cap.ma != 100) &&
+ (otg->charging_cap.ma != 150) &&
+ (otg->charging_cap.ma != 500) &&
+ (otg->charging_cap.ma != 900))) {
+ otg_err(otg, "%s: invalid SDP current!\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&otg->lock, flags);
+ cap.chrg_type = otg->charging_cap.chrg_type;
+ cap.ma = otg->charging_cap.ma;
+ cap.chrg_evt = event;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ if (sdp_charging(otg))
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_ENUMERATED, &cap.ma);
+ else
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_CHARGER, &cap);
+
+ return 0;
+}
+
+static enum power_supply_charger_cable_type
+ dwc3_intel_byt_get_charger_type(struct dwc_otg2 *otg)
+{
+ struct usb_phy *phy;
+ u8 val, vdat_det, chgd_serx_dm;
+ unsigned long timeout, interval;
+ enum power_supply_charger_cable_type type =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+ /* No need to do charger detection if not enabled */
+ if (!charger_detect_enable(otg))
+ return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy) {
+ otg_err(otg, "Get USB2 PHY failed\n");
+ return POWER_SUPPLY_CHARGER_TYPE_NONE;
+ }
+
+ /* PHY Enable:
+ * Power on PHY
+ */
+ enable_usb_phy(otg, true);
+
+ /* Wait 10ms (~5ms before PHY de-asserts DIR,
+ * XXus for initial Link reg sync-up).*/
+ msleep(20);
+
+ /* DCD Enable: Change OPMODE to 01 (Non-driving),
+ * TermSel to 0, &
+ * XcvrSel to 01 (enable FS xcvr)
+ */
+ usb_phy_io_write(phy, FUNCCTRL_OPMODE(1) | FUNCCTRL_XCVRSELECT(1),
+ TUSB1211_FUNC_CTRL_SET);
+
+ usb_phy_io_write(phy, FUNCCTRL_OPMODE(2) | FUNCCTRL_XCVRSELECT(2)
+ | FUNCCTRL_TERMSELECT,
+ TUSB1211_FUNC_CTRL_CLR);
+
+ /*Enable SW control*/
+ usb_phy_io_write(phy, PWCTRL_SW_CONTROL, TUSB1211_POWER_CONTROL_SET);
+
+ /* Enable IDPSRC */
+ usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+ TUSB1211_VENDOR_SPECIFIC3_SET);
+
+ /* Check DCD result, use same polling parameter */
+ timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+ interval = DATACON_INTERVAL * 1000; /* us */
+
+ /* DCD Check:
+ * Delay 66.5 ms. (Note:
+ * TIDP_SRC_ON + TCHGD_SERX_DEB =
+ * 347.8us + 66.1ms).
+ */
+ usleep_range(66500, 67000);
+
+ while (!time_after(jiffies, timeout)) {
+ /* Read DP logic level. */
+ val = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+ if (val < 0) {
+ otg_err(otg, "ULPI read error! try again\n");
+ continue;
+ }
+
+ if (!(val & VS4_CHGD_SERX_DP)) {
+ otg_info(otg, "Data contact detected!\n");
+ break;
+ }
+
+ /* Polling interval */
+ usleep_range(interval, interval + 2000);
+ }
+
+ /* Disable DP pullup (Idp_src) */
+ usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+ TUSB1211_VENDOR_SPECIFIC3_CLR);
+
+ /* SE1 Det Enable:
+ * Read DP/DM logic level. Note: use DEBUG
+ * because VS4 isn’t enabled in this situation.
+ */
+ val = usb_phy_io_read(phy, TUSB1211_DEBUG);
+ if (val < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ val &= DEBUG_LINESTATE;
+
+ /* If '11': SE1 detected; goto 'Cleanup'.
+ * Else: goto 'Pri Det Enable'.
+ */
+ if (val == 3) {
+ type = POWER_SUPPLY_CHARGER_TYPE_SE1;
+ goto cleanup;
+ }
+
+ /* Pri Det Enable:
+ * Enable VDPSRC.
+ */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+ /* Wait >106.1ms (40ms for BC
+ * Tvdpsrc_on, 66.1ms for TI CHGD_SERX_DEB).
+ */
+ msleep(107);
+
+ /* Pri Det Check:
+ * Check if DM > VDATREF.
+ */
+ vdat_det = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+ if (vdat_det < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ vdat_det &= PWCTRL_VDAT_DET;
+
+ /* Check if DM<VLGC */
+ chgd_serx_dm = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+ if (chgd_serx_dm < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ chgd_serx_dm &= VS4_CHGD_SERX_DM;
+
+ /* If VDAT_DET==0 || CHGD_SERX_DM==1: SDP detected
+ * If VDAT_DET==1 && CHGD_SERX_DM==0: CDP/DCP
+ */
+ if (vdat_det == 0 || chgd_serx_dm == 1)
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+ /* Disable VDPSRC. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+ /* If SDP, goto “Cleanup”.
+ * Else, goto “Sec Det Enable”
+ */
+ if (type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+ goto cleanup;
+
+ /* Sec Det Enable:
+ * delay 1ms.
+ */
+ usleep_range(1000, 1500);
+
+ /* Swap DP & DM */
+ usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_CLR);
+
+ /* Enable 'VDMSRC'. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+ /* Wait >73ms (40ms for BC Tvdmsrc_on, 33ms for TI TVDPSRC_DEB) */
+ msleep(80);
+
+ /* Sec Det Check:
+ * Check if DP>VDATREF.
+ */
+ val = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+ if (val < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ val &= PWCTRL_VDAT_DET;
+
+ /* If VDAT_DET==0: CDP detected.
+ * If VDAT_DET==1: DCP detected.
+ */
+ if (!val)
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+ else
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+
+ /* Disable VDMSRC. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+ /* Swap DP & DM. */
+ usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+cleanup:
+
+ /* If DCP detected, assert VDPSRC. */
+ if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+ usb_phy_io_write(phy, PWCTRL_SW_CONTROL | PWCTRL_DP_VSRC_EN,
+ TUSB1211_POWER_CONTROL_SET);
+
+ usb_put_phy(phy);
+
+ switch (type) {
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ dwc_otg_charger_hwdet(true);
+ break;
+ default:
+ break;
+ };
+
+ return type;
+}
+
+static int dwc3_intel_byt_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ int state, val;
+ unsigned long flags;
+
+ if (!otg)
+ return NOTIFY_BAD;
+
+ val = *(int *)data;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ switch (event) {
+ case USB_EVENT_VBUS:
+ if (val) {
+ otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+ otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+ } else {
+ otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+ otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+ }
+ state = NOTIFY_OK;
+ break;
+ default:
+ otg_dbg(otg, "DWC OTG Notify unknow notify message\n");
+ state = NOTIFY_DONE;
+ }
+ dwc3_wakeup_otg_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ return state;
+
+}
+
+int dwc3_intel_byt_prepare_start_host(struct dwc_otg2 *otg)
+{
+ return 0;
+}
+
+int dwc3_intel_byt_prepare_start_peripheral(struct dwc_otg2 *otg)
+{
+ enable_usb_phy(otg, true);
+ usb2phy_eye_optimization(otg);
+ disable_phy_auto_resume(otg);
+
+ return 0;
+}
+
+int dwc3_intel_byt_suspend(struct dwc_otg2 *otg)
+{
+ struct pci_dev *pci_dev;
+ pci_power_t state = PCI_D3hot;
+
+ if (!otg)
+ return 0;
+
+ pci_dev = to_pci_dev(otg->dev);
+
+ set_sus_phy(otg, 1);
+
+ if (pci_save_state(pci_dev)) {
+ otg_err(otg, "pci_save_state failed!\n");
+ return -EIO;
+ }
+
+ pci_disable_device(pci_dev);
+ pci_set_power_state(pci_dev, state);
+
+ return 0;
+}
+
+int dwc3_intel_byt_resume(struct dwc_otg2 *otg)
+{
+ struct pci_dev *pci_dev = to_pci_dev(otg->dev);
+
+ if (!otg)
+ return 0;
+
+ /* From synopsys spec 12.2.11.
+ * Software cannot access memory-mapped I/O space
+ * for 10ms.
+ */
+ mdelay(10);
+
+ pci_restore_state(pci_dev);
+ if (pci_enable_device(pci_dev) < 0) {
+ otg_err(otg, "pci_enable_device failed.\n");
+ return -EIO;
+ }
+
+ set_sus_phy(otg, 0);
+
+ return 0;
+}
+
+struct dwc3_otg_hw_ops dwc3_intel_byt_otg_pdata = {
+ .mode = DWC3_DEVICE_ONLY,
+ .bus = DWC3_PCI,
+ .get_id = dwc3_intel_byt_get_id,
+ .b_idle = dwc3_intel_byt_b_idle,
+ .set_power = dwc3_intel_byt_set_power,
+ .enable_vbus = dwc3_intel_byt_enable_vbus,
+ .platform_init = dwc3_intel_byt_platform_init,
+ .get_charger_type = dwc3_intel_byt_get_charger_type,
+ .otg_notifier_handler = dwc3_intel_byt_handle_notification,
+ .prepare_start_peripheral = dwc3_intel_byt_prepare_start_peripheral,
+ .prepare_start_host = dwc3_intel_byt_prepare_start_host,
+ .notify_charger_type = dwc3_intel_byt_notify_charger_type,
+
+ .suspend = dwc3_intel_byt_suspend,
+ .resume = dwc3_intel_byt_resume,
+};
+
+static int __init dwc3_intel_byt_init(void)
+{
+ return dwc3_otg_register(&dwc3_intel_byt_otg_pdata);
+}
+module_init(dwc3_intel_byt_init);
+
+static void __exit dwc3_intel_byt_exit(void)
+{
+ dwc3_otg_unregister(&dwc3_intel_byt_otg_pdata);
+}
+module_exit(dwc3_intel_byt_exit);
+
+MODULE_AUTHOR("Wang Yu <yu.y.wang@intel.com>");
+MODULE_AUTHOR("Wu, Hao <hao.wu@intel.com>");
+MODULE_DESCRIPTION("DWC3 Intel BYT OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VERSION);
--- /dev/null
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include <asm/intel_scu_pmic.h>
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+static int otg_id = -1;
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off);
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event);
+static struct power_supply_cable_props cap_record;
+
+static int charger_detect_enable(struct dwc_otg2 *otg)
+{
+ struct intel_dwc_otg_pdata *data;
+
+ if (!otg || !otg->otg_data)
+ return 0;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ return data->charger_detect_enable;
+}
+
+static int is_basin_cove(struct dwc_otg2 *otg)
+{
+ struct intel_dwc_otg_pdata *data;
+ if (!otg || !otg->otg_data)
+ return -EINVAL;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ return data->pmic_type == BASIN_COVE;
+}
+
+static int is_hybridvp(struct dwc_otg2 *otg)
+{
+ struct intel_dwc_otg_pdata *data;
+ if (!otg || !otg->otg_data)
+ return -EINVAL;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ return data->is_hvp;
+}
+
+static void usb2phy_eye_optimization(struct dwc_otg2 *otg)
+{
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy)
+ return;
+
+ /* Set 0x7f for better quality in eye diagram
+ * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+ usb_phy_io_write(phy, 0x7f, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+ usb_put_phy(phy);
+}
+
+
+/* As we use SW mode to do charger detection, need to notify HW
+ * the result SW get, charging port or not */
+static int dwc_otg_charger_hwdet(bool enable)
+{
+ int retval;
+ struct usb_phy *phy;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+
+ /* Just return if charger detection is not enabled */
+ if (!charger_detect_enable(otg))
+ return 0;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy)
+ return -ENODEV;
+
+ if (enable) {
+ retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+ TUSB1211_POWER_CONTROL_SET);
+ if (retval)
+ return retval;
+ otg_dbg(otg, "set HWDETECT\n");
+ } else {
+ retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+ TUSB1211_POWER_CONTROL_CLR);
+ if (retval)
+ return retval;
+ otg_dbg(otg, "clear HWDETECT\n");
+ }
+ usb_put_phy(phy);
+
+ return 0;
+}
+
+static enum power_supply_charger_cable_type
+ basin_cove_aca_check(struct dwc_otg2 *otg)
+{
+ u8 rarbrc;
+ int ret;
+ enum power_supply_charger_cable_type type =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+ ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+ USBIDCTRL_ACA_DETEN_D1,
+ USBIDCTRL_ACA_DETEN_D1);
+ if (ret)
+ otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+ /* Wait >66.1ms (for TCHGD_SERX_DEB) */
+ msleep(66);
+
+ /* Read decoded RID value */
+ ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &rarbrc);
+ if (ret)
+ otg_err(otg, "Fail to read decoded RID value\n");
+ rarbrc &= USBIDSTS_ID_RARBRC_STS(3);
+ rarbrc >>= 1;
+
+ /* If ID_RARBRC_STS==01: ACA-Dock detected
+ * If ID_RARBRC_STS==00: MHL detected
+ */
+ if (rarbrc == 1) {
+ /* ACA-Dock */
+ type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+ } else if (!rarbrc) {
+ /* MHL */
+ type = POWER_SUPPLY_CHARGER_TYPE_MHL;
+ }
+
+ ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+ USBIDCTRL_ACA_DETEN_D1,
+ 0);
+ if (ret)
+ otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+ return type;
+}
+
+static enum power_supply_charger_cable_type
+ dwc3_intel_aca_check(struct dwc_otg2 *otg)
+{
+ return basin_cove_aca_check(otg);
+}
+
+static ssize_t store_otg_id(struct device *_dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+
+ if (!otg)
+ return 0;
+ if (count != 2) {
+ otg_err(otg, "return EINVAL\n");
+ return -EINVAL;
+ }
+
+ if (count > 0 && buf[count-1] == '\n')
+ ((char *) buf)[count-1] = 0;
+
+ switch (buf[0]) {
+ case 'a':
+ case 'A':
+ otg_dbg(otg, "Change ID to A\n");
+ otg->user_events |= USER_ID_A_CHANGE_EVENT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ otg_id = 0;
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ case 'b':
+ case 'B':
+ otg_dbg(otg, "Change ID to B\n");
+ otg->user_events |= USER_ID_B_CHANGE_EVENT;
+ spin_lock_irqsave(&otg->lock, flags);
+ dwc3_wakeup_otg_thread(otg);
+ otg_id = 1;
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return count;
+ default:
+ otg_err(otg, "Just support change ID to A!\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+show_otg_id(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ char *next;
+ unsigned size, t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size,
+ "USB OTG ID: %s\n",
+ (otg_id ? "B" : "A")
+ );
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(otg_id, S_IRUGO|S_IWUSR|S_IWGRP,
+ show_otg_id, store_otg_id);
+
+static void set_sus_phy(struct dwc_otg2 *otg, int bit)
+{
+ u32 data = 0;
+
+ data = otg_read(otg, GUSB2PHYCFG0);
+ if (bit)
+ data |= GUSB2PHYCFG_SUS_PHY;
+ else
+ data &= ~GUSB2PHYCFG_SUS_PHY;
+
+ otg_write(otg, GUSB2PHYCFG0, data);
+
+ data = otg_read(otg, GUSB3PIPECTL0);
+ if (bit)
+ data |= GUSB3PIPECTL_SUS_EN;
+ else
+ data &= ~GUSB3PIPECTL_SUS_EN;
+ otg_write(otg, GUSB3PIPECTL0, data);
+}
+
+int dwc3_intel_platform_init(struct dwc_otg2 *otg)
+{
+ u32 gctl;
+ int retval;
+
+ otg_info(otg, "De-assert USBRST# to enable PHY\n");
+ retval = intel_scu_ipc_iowrite8(PMIC_USBPHYCTRL,
+ PMIC_USBPHYCTRL_D0);
+ if (retval)
+ otg_err(otg, "Fail to de-assert USBRST#\n");
+
+ /* Don't let phy go to suspend mode, which
+ * will cause FS/LS devices enum failed in host mode.
+ */
+ set_sus_phy(otg, 0);
+
+ retval = device_create_file(otg->dev, &dev_attr_otg_id);
+ if (retval < 0) {
+ otg_dbg(otg,
+ "Can't register sysfs attribute: %d\n", retval);
+ return -ENOMEM;
+ }
+
+ otg_dbg(otg, "\n");
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, OCTL, 0);
+ gctl = otg_read(otg, GCTL);
+ gctl |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, gctl);
+
+ return 0;
+}
+
+/* Disable auto-resume feature for USB2 PHY. This is one
+ * silicon workaround. It will cause fabric timeout error
+ * for LS case after resume from hibernation */
+static void disable_phy_auto_resume(struct dwc_otg2 *otg)
+{
+ u32 data = 0;
+
+ data = otg_read(otg, GUSB2PHYCFG0);
+ data &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+ otg_write(otg, GUSB2PHYCFG0, data);
+}
+
+/* This function will control VUSBPHY to power gate/ungate USBPHY */
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off)
+{
+ int ret;
+
+ if (on_off) {
+ ret = intel_scu_ipc_update_register(PMIC_VLDOCNT,
+ 0xff, PMIC_VLDOCNT_VUSBPHYEN);
+ if (ret)
+ otg_err(otg, "Fail to enable VBUSPHY\n");
+
+ msleep(20);
+ } else {
+ ret = intel_scu_ipc_update_register(PMIC_VLDOCNT,
+ 0x00, PMIC_VLDOCNT_VUSBPHYEN);
+ if (ret)
+ otg_err(otg, "Fail to disable VBUSPHY\n");
+ }
+
+ return 0;
+}
+
+int basin_cove_get_id(struct dwc_otg2 *otg)
+{
+ int ret, id = RID_UNKNOWN;
+ u8 idsts, pmic_id;
+
+ ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+ USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+ USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+ if (ret)
+ otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+ mdelay(50);
+
+ ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &idsts);
+ if (ret) {
+ otg_err(otg, "Fail to read id\n");
+ return id;
+ }
+
+ if (idsts & USBIDSTS_ID_FLOAT_STS)
+ id = RID_FLOAT;
+ else if (idsts & USBIDSTS_ID_RARBRC_STS(1))
+ id = RID_A;
+ else if (idsts & USBIDSTS_ID_RARBRC_STS(2))
+ id = RID_B;
+ else if (idsts & USBIDSTS_ID_RARBRC_STS(3))
+ id = RID_C;
+ else {
+ /* PMIC A0 reports ID_GND = 0 for RID_GND but PMIC B0 reports
+ * ID_GND = 1 for RID_GND
+ */
+ ret = intel_scu_ipc_ioread8(0x00, &pmic_id);
+ if (ret) {
+ otg_err(otg, "Fail to read PMIC ID register\n");
+ } else if (((pmic_id & VENDOR_ID_MASK) == BASIN_COVE_PMIC_ID) &&
+ ((pmic_id & PMIC_MAJOR_REV) == PMIC_A0_MAJOR_REV)) {
+ if (idsts & USBIDSTS_ID_GND)
+ id = RID_GND;
+ } else {
+ if (!(idsts & USBIDSTS_ID_GND))
+ id = RID_GND;
+ }
+ }
+
+ ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+ USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+ 0);
+ if (ret)
+ otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+ return id;
+}
+
+int dwc3_intel_get_id(struct dwc_otg2 *otg)
+{
+ return basin_cove_get_id(otg);
+}
+
+int dwc3_intel_b_idle(struct dwc_otg2 *otg)
+{
+ u32 gctl, tmp;
+
+ /* Disable hibernation mode by default */
+ gctl = otg_read(otg, GCTL);
+ gctl &= ~GCTL_GBL_HIBERNATION_EN;
+ otg_write(otg, GCTL, gctl);
+
+ /* Reset ADP related registers */
+ otg_write(otg, ADPCFG, 0);
+ otg_write(otg, ADPCTL, 0);
+ otg_write(otg, ADPEVTEN, 0);
+ tmp = otg_read(otg, ADPEVT);
+ otg_write(otg, ADPEVT, tmp);
+
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OEVTEN, 0);
+ tmp = otg_read(otg, OEVT);
+ otg_write(otg, OEVT, tmp);
+ otg_write(otg, OCTL, OCTL_PERI_MODE);
+
+ /* Force config to device mode as default */
+ gctl = otg_read(otg, GCTL);
+ gctl &= ~GCTL_PRT_CAP_DIR;
+ gctl |= GCTL_PRT_CAP_DIR_DEV << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, gctl);
+
+ if (!is_hybridvp(otg)) {
+ dwc_otg_charger_hwdet(false);
+ enable_usb_phy(otg, false);
+ }
+
+ mdelay(100);
+
+ return 0;
+}
+
+static int dwc3_intel_set_power(struct usb_phy *_otg,
+ unsigned ma)
+{
+ unsigned long flags;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ struct power_supply_cable_props cap;
+ struct intel_dwc_otg_pdata *data;
+
+ data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+ if (otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_USB_CDP)
+ return 0;
+ else if (otg->charging_cap.chrg_type !=
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+ otg_err(otg, "%s: currently, chrg type is not SDP!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ma == OTG_DEVICE_SUSPEND) {
+ spin_lock_irqsave(&otg->lock, flags);
+ cap.chrg_type = otg->charging_cap.chrg_type;
+ cap.ma = otg->charging_cap.ma;
+ cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ /* mA is zero mean D+/D- opened cable.
+ * If SMIP set, then notify 500mA.
+ * Otherwise, notify 0mA.
+ */
+ if (!cap.ma) {
+ if (data->charging_compliance) {
+ cap.ma = 500;
+ cap.chrg_evt =
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ }
+ /* For standard SDP, if SMIP set, then ignore suspend */
+ } else if (data->charging_compliance)
+ return 0;
+ /* Stander SDP(cap.mA != 0) and SMIP not set.
+ * Should send 0mA with SUSPEND event
+ */
+ else
+ cap.ma = 0;
+
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_CHARGER, &cap);
+ otg_dbg(otg, "Notify EM");
+ otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_SUSPEND\n");
+
+ return 0;
+ } else if (ma == OTG_DEVICE_RESUME) {
+ otg_dbg(otg, "Notify EM");
+ otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_CONNECT\n");
+ dwc3_intel_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+ return 0;
+ }
+
+ /* For SMIP set case, only need to report 500/900mA */
+ if (data->charging_compliance) {
+ if ((ma != OTG_USB2_500MA) &&
+ (ma != OTG_USB3_900MA))
+ return 0;
+ }
+
+ /* Covert macro to integer number*/
+ switch (ma) {
+ case OTG_USB2_100MA:
+ ma = 100;
+ break;
+ case OTG_USB3_150MA:
+ ma = 150;
+ break;
+ case OTG_USB2_500MA:
+ ma = 500;
+ break;
+ case OTG_USB3_900MA:
+ ma = 900;
+ break;
+ default:
+ otg_err(otg, "Device driver set invalid SDP current value!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.ma = ma;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ dwc3_intel_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+ return 0;
+}
+
+int dwc3_intel_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+ USB_EVENT_DRIVE_VBUS, &enable);
+
+ return 0;
+}
+
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event)
+{
+ struct power_supply_cable_props cap;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!charger_detect_enable(otg) &&
+ (otg->charging_cap.chrg_type !=
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP))
+ return 0;
+
+ if (event > POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+ otg_err(otg,
+ "%s: Invalid power_supply_charger_event!\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP) &&
+ ((otg->charging_cap.ma != 0) &&
+ (otg->charging_cap.ma != 100) &&
+ (otg->charging_cap.ma != 150) &&
+ (otg->charging_cap.ma != 500) &&
+ (otg->charging_cap.ma != 900))) {
+ otg_err(otg, "%s: invalid SDP current!\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&otg->lock, flags);
+ cap.chrg_type = otg->charging_cap.chrg_type;
+ cap.ma = otg->charging_cap.ma;
+ cap.chrg_evt = event;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ atomic_notifier_call_chain(&otg->usb2_phy.notifier, USB_EVENT_CHARGER,
+ &cap);
+
+ return ret;
+}
+
+static void dwc3_phy_soft_reset(struct dwc_otg2 *otg)
+{
+ u32 val;
+
+ val = otg_read(otg, GCTL);
+ val |= GCTL_CORESOFTRESET;
+ otg_write(otg, GCTL, val);
+
+ val = otg_read(otg, GUSB3PIPECTL0);
+ val |= GUSB3PIPECTL_PHYSOFTRST;
+ otg_write(otg, GUSB3PIPECTL0, val);
+
+ val = otg_read(otg, GUSB2PHYCFG0);
+ val |= GUSB2PHYCFG_PHYSOFTRST;
+ otg_write(otg, GUSB2PHYCFG0, val);
+
+ msleep(50);
+
+ val = otg_read(otg, GUSB3PIPECTL0);
+ val &= ~GUSB3PIPECTL_PHYSOFTRST;
+ otg_write(otg, GUSB3PIPECTL0, val);
+
+ val = otg_read(otg, GUSB2PHYCFG0);
+ val &= ~GUSB2PHYCFG_PHYSOFTRST;
+ otg_write(otg, GUSB2PHYCFG0, val);
+
+ msleep(100);
+
+ val = otg_read(otg, GCTL);
+ val &= ~GCTL_CORESOFTRESET;
+ otg_write(otg, GCTL, val);
+}
+
+static enum power_supply_charger_cable_type
+ dwc3_intel_get_charger_type(struct dwc_otg2 *otg)
+{
+ int ret;
+ struct usb_phy *phy;
+ u8 val, vdat_det, chgd_serx_dm;
+ unsigned long timeout, interval;
+ enum power_supply_charger_cable_type type =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+ if (!charger_detect_enable(otg))
+ return cap_record.chrg_type;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!phy) {
+ otg_err(otg, "Get USB2 PHY failed\n");
+ return POWER_SUPPLY_CHARGER_TYPE_NONE;
+ }
+
+ /* PHY Enable:
+ * Power on PHY
+ */
+ enable_usb_phy(otg, true);
+ dwc3_phy_soft_reset(otg);
+
+ /* Enable ACA:
+ * Enable ACA & ID detection logic.
+ */
+ ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+ USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+ USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+ if (ret)
+ otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+ /* DCD Enable: Change OPMODE to 01 (Non-driving),
+ * TermSel to 0, &
+ * XcvrSel to 01 (enable FS xcvr)
+ */
+ usb_phy_io_write(phy, FUNCCTRL_OPMODE(1) | FUNCCTRL_XCVRSELECT(1),
+ TUSB1211_FUNC_CTRL_SET);
+
+ usb_phy_io_write(phy, FUNCCTRL_OPMODE(2) | FUNCCTRL_XCVRSELECT(2)
+ | FUNCCTRL_TERMSELECT,
+ TUSB1211_FUNC_CTRL_CLR);
+
+ /*Enable SW control*/
+ usb_phy_io_write(phy, PWCTRL_SW_CONTROL, TUSB1211_POWER_CONTROL_SET);
+
+ /* Enable IDPSRC */
+ usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+ TUSB1211_VENDOR_SPECIFIC3_SET);
+
+ /* Check DCD result, use same polling parameter */
+ timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+ interval = DATACON_INTERVAL * 1000; /* us */
+
+ /* DCD Check:
+ * Delay 66.5 ms. (Note:
+ * TIDP_SRC_ON + TCHGD_SERX_DEB =
+ * 347.8us + 66.1ms).
+ */
+ usleep_range(66500, 67000);
+
+ while (!time_after(jiffies, timeout)) {
+ /* Read DP logic level. */
+ val = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+ if (val < 0) {
+ otg_err(otg, "ULPI read error! try again\n");
+ continue;
+ }
+
+ if (!(val & VS4_CHGD_SERX_DP)) {
+ otg_info(otg, "Data contact detected!\n");
+ break;
+ }
+
+ /* Polling interval */
+ usleep_range(interval, interval + 2000);
+ }
+
+ /* Disable DP pullup (Idp_src) */
+ usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+ TUSB1211_VENDOR_SPECIFIC3_CLR);
+
+ /* ID Check:
+ * Check ID pin state.
+ */
+ val = dwc3_intel_get_id(otg);
+ if (val != RID_FLOAT) {
+ type = dwc3_intel_aca_check(otg);
+ goto cleanup;
+ }
+
+ /* SE1 Det Enable:
+ * Read DP/DM logic level. Note: use DEBUG
+ * because VS4 isn’t enabled in this situation.
+ */
+ val = usb_phy_io_read(phy, TUSB1211_DEBUG);
+ if (val < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ val &= DEBUG_LINESTATE;
+
+ /* If '11': SE1 detected; goto 'Cleanup'.
+ * Else: goto 'Pri Det Enable'.
+ */
+ if (val == 3) {
+ type = POWER_SUPPLY_CHARGER_TYPE_SE1;
+ goto cleanup;
+ }
+
+ /* Pri Det Enable:
+ * Enable VDPSRC.
+ */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+ /* Wait >106.1ms (40ms for BC
+ * Tvdpsrc_on, 66.1ms for TI CHGD_SERX_DEB).
+ */
+ msleep(107);
+
+ /* Pri Det Check:
+ * Check if DM > VDATREF.
+ */
+ vdat_det = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+ if (vdat_det < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ vdat_det &= PWCTRL_VDAT_DET;
+
+ /* Check if DM<VLGC */
+ chgd_serx_dm = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+ if (chgd_serx_dm < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ chgd_serx_dm &= VS4_CHGD_SERX_DM;
+
+ /* If VDAT_DET==0 || CHGD_SERX_DM==1: SDP detected
+ * If VDAT_DET==1 && CHGD_SERX_DM==0: CDP/DCP
+ */
+ if (vdat_det == 0 || chgd_serx_dm == 1)
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+ /* Disable VDPSRC. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+ /* If SDP, goto “Cleanup”.
+ * Else, goto “Sec Det Enable”
+ */
+ if (type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+ goto cleanup;
+
+ /* Sec Det Enable:
+ * delay 1ms.
+ */
+ usleep_range(1000, 1500);
+
+ /* Swap DP & DM */
+ usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_CLR);
+
+ /* Enable 'VDMSRC'. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+ /* Wait >73ms (40ms for BC Tvdmsrc_on, 33ms for TI TVDPSRC_DEB) */
+ msleep(80);
+
+ /* Sec Det Check:
+ * Check if DP>VDATREF.
+ */
+ val = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+ if (val < 0)
+ otg_err(otg, "ULPI read error!\n");
+
+ val &= PWCTRL_VDAT_DET;
+
+ /* If VDAT_DET==0: CDP detected.
+ * If VDAT_DET==1: DCP detected.
+ */
+ if (!val)
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+ else
+ type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+
+ /* Disable VDMSRC. */
+ usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+ /* Swap DP & DM. */
+ usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+cleanup:
+
+ /* If DCP detected, assert VDPSRC. */
+ if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+ usb_phy_io_write(phy, PWCTRL_SW_CONTROL | PWCTRL_DP_VSRC_EN,
+ TUSB1211_POWER_CONTROL_SET);
+
+ usb_put_phy(phy);
+
+ switch (type) {
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ dwc_otg_charger_hwdet(true);
+ break;
+ default:
+ break;
+ };
+
+ return type;
+}
+
+static int dwc3_intel_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int state;
+ unsigned long flags, valid_chrg_type;
+ struct dwc_otg2 *otg = dwc3_get_otg();
+ struct power_supply_cable_props *cap;
+
+ if (!otg)
+ return NOTIFY_BAD;
+
+ valid_chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP |
+ POWER_SUPPLY_CHARGER_TYPE_USB_CDP |
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ switch (event) {
+ case USB_EVENT_ID:
+ otg->otg_events |= OEVT_CONN_ID_STS_CHNG_EVNT;
+ state = NOTIFY_OK;
+ break;
+ case USB_EVENT_VBUS:
+ if (*(int *)data) {
+ otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+ otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+ } else {
+ otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+ otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+ }
+ state = NOTIFY_OK;
+ break;
+ case USB_EVENT_CHARGER:
+ if (charger_detect_enable(otg)) {
+ state = NOTIFY_DONE;
+ goto done;
+ }
+ cap = (struct power_supply_cable_props *)data;
+ if (!(cap->chrg_type & valid_chrg_type)) {
+ otg_err(otg, "Invalid charger type!\n");
+ state = NOTIFY_BAD;
+ }
+ if (cap->chrg_evt == POWER_SUPPLY_CHARGER_EVENT_CONNECT) {
+ otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+ otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ cap_record.chrg_type = cap->chrg_type;
+ cap_record.ma = cap->ma;
+ cap_record.chrg_evt = cap->chrg_evt;
+ } else if (cap->chrg_evt ==
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+ otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+ otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+ cap_record.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+ cap_record.ma = 0;
+ cap_record.chrg_evt =
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+ }
+ state = NOTIFY_OK;
+ break;
+ default:
+ otg_dbg(otg, "DWC OTG Notify unknow notify message\n");
+ state = NOTIFY_DONE;
+ }
+
+done:
+ dwc3_wakeup_otg_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ return state;
+
+}
+
+int dwc3_intel_prepare_start_host(struct dwc_otg2 *otg)
+{
+ if (!is_hybridvp(otg)) {
+ enable_usb_phy(otg, true);
+ usb2phy_eye_optimization(otg);
+ disable_phy_auto_resume(otg);
+ }
+
+ return 0;
+}
+
+int dwc3_intel_prepare_start_peripheral(struct dwc_otg2 *otg)
+{
+ if (!is_hybridvp(otg)) {
+ enable_usb_phy(otg, true);
+ usb2phy_eye_optimization(otg);
+ disable_phy_auto_resume(otg);
+ }
+
+ return 0;
+}
+
+int dwc3_intel_suspend(struct dwc_otg2 *otg)
+{
+ struct pci_dev *pci_dev;
+ pci_power_t state = PCI_D3cold;
+
+ if (!otg)
+ return 0;
+
+ pci_dev = to_pci_dev(otg->dev);
+
+ if (otg->state == DWC_STATE_B_PERIPHERAL ||
+ otg->state == DWC_STATE_A_HOST)
+ state = PCI_D3hot;
+
+ set_sus_phy(otg, 1);
+
+ if (pci_save_state(pci_dev)) {
+ otg_err(otg, "pci_save_state failed!\n");
+ return -EIO;
+ }
+
+ pci_disable_device(pci_dev);
+ pci_set_power_state(pci_dev, state);
+
+ return 0;
+}
+
+int dwc3_intel_resume(struct dwc_otg2 *otg)
+{
+ struct pci_dev *pci_dev;
+
+ if (!otg)
+ return 0;
+
+ pci_dev = to_pci_dev(otg->dev);
+
+ /* From synopsys spec 12.2.11.
+ * Software cannot access memory-mapped I/O space
+ * for 10ms. Delay 5 ms here should be enough. Too
+ * long a delay causes hibernation exit failure.
+ */
+ mdelay(5);
+
+ pci_restore_state(pci_dev);
+ if (pci_enable_device(pci_dev) < 0) {
+ otg_err(otg, "pci_enable_device failed.\n");
+ return -EIO;
+ }
+
+ set_sus_phy(otg, 0);
+
+ /* Delay 1ms waiting PHY clock debounce.
+ * Without this debounce, will met fabric error randomly.
+ **/
+ mdelay(1);
+
+ return 0;
+}
+
+struct dwc3_otg_hw_ops dwc3_intel_otg_pdata = {
+ .mode = DWC3_DRD,
+ .bus = DWC3_PCI,
+ .get_id = dwc3_intel_get_id,
+ .b_idle = dwc3_intel_b_idle,
+ .set_power = dwc3_intel_set_power,
+ .enable_vbus = dwc3_intel_enable_vbus,
+ .platform_init = dwc3_intel_platform_init,
+ .get_charger_type = dwc3_intel_get_charger_type,
+ .otg_notifier_handler = dwc3_intel_handle_notification,
+ .prepare_start_peripheral = dwc3_intel_prepare_start_peripheral,
+ .prepare_start_host = dwc3_intel_prepare_start_host,
+ .notify_charger_type = dwc3_intel_notify_charger_type,
+
+ .suspend = dwc3_intel_suspend,
+ .resume = dwc3_intel_resume,
+};
+
+static int __init dwc3_intel_init(void)
+{
+ return dwc3_otg_register(&dwc3_intel_otg_pdata);
+}
+module_init(dwc3_intel_init);
+
+static void __exit dwc3_intel_exit(void)
+{
+ dwc3_otg_unregister(&dwc3_intel_otg_pdata);
+}
+module_exit(dwc3_intel_exit);
+
+MODULE_AUTHOR("Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 Intel OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VERSION);
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ /* FIXME: move to pci_ids.h */
+ }, {
+#define PCI_DEVICE_ID_INTEL_DWC_TNG 0x119e
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DWC_TNG),
},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
+ if (dwc->is_ebc)
+ break;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU1ENA;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
+ if (dwc->is_ebc)
+ break;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU2ENA;
* Enable transition to U1/U2 state when
* nothing is pending from application.
*/
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ if (!dwc->is_ebc) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= (DWC3_DCTL_ACCEPTU1ENA
+ | DWC3_DCTL_ACCEPTU2ENA);
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
dwc->resize_fifos = true;
dev_dbg(dwc->dev, "resize fifos flag SET\n");
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
+ if (list_empty(&ep0->request_list))
+ return;
+
r = next_request(&ep0->request_list);
ur = &r->request;
return;
}
+ /*
+ * Per databook, if an XferNotready(Data) is received after
+ * XferComplete(Data), one possible reason is host is trying
+ * to complete data stage by moving a 0-length packet.
+ *
+ * REVISIT in case of other cases
+ */
+ if (dwc->ep0_next_event == DWC3_EP0_NRDY_STATUS) {
+ u32 size = 0;
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+ if (dep->number == 0)
+ size = dep->endpoint.maxpacket;
+
+ dwc3_ep0_start_trans(dwc, dep->number,
+ dwc->ctrl_req_addr, size,
+ DWC3_TRBCTL_CONTROL_DATA);
+ }
+
break;
case DEPEVT_STATUS_CONTROL_STATUS:
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
+#include "otg.h"
+
+static LIST_HEAD(ebc_io_ops);
/**
* dwc3_gadget_set_test_mode - Enables USB2 Test Modes
if (dwc->ep0_bounced && dep->number == 0)
dwc->ep0_bounced = false;
- else
+ else if (!dep->ebc)
usb_gadget_unmap_request(&dwc->gadget, &req->request,
req->direction);
unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
{
struct dwc3_ep *dep = dwc->eps[ep];
- u32 timeout = 500;
+ u32 timeout = 5000;
u32 reg;
dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
if (dep->number == 0 || dep->number == 1)
return 0;
- dep->trb_pool = dma_alloc_coherent(dwc->dev,
- sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
- &dep->trb_pool_dma, GFP_KERNEL);
+ if (dep->ebc)
+ dep->trb_pool = dep->ebc->alloc_static_trb_pool(
+ &dep->trb_pool_dma);
+ else
+ dep->trb_pool = dma_alloc_coherent(dwc->dev,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ &dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
dep->name);
{
struct dwc3 *dwc = dep->dwc;
- dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ if (dep->ebc)
+ dep->ebc->free_static_trb_pool();
+ else
+ dma_free_coherent(dwc->dev,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore)
+ bool ignore, u32 cfg_action)
{
struct dwc3_gadget_ep_cmd_params params;
memset(¶ms, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
- | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+ | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
+ | cfg_action;
- /* Burst size is only needed in SuperSpeed mode */
- if (dwc->gadget.speed == USB_SPEED_SUPER) {
- u32 burst = dep->endpoint.maxburst - 1;
+ if (dep->ebc) {
+ if (dwc->gadget.speed == USB_SPEED_SUPER) {
+ u32 burst = 0;
- params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
- }
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+ }
- if (ignore)
params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
- params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
- | DWC3_DEPCFG_XFER_NOT_READY_EN;
+ params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
+
+ if (dep->ebc->is_ondemand)
+ params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+ dep->stream_capable = false;
+ } else {
+ /* Burst size is only needed in SuperSpeed mode */
+ if (dwc->gadget.speed == USB_SPEED_SUPER) {
+ /* In case a function forgets to set maxburst, maxburst
+ * may be still 0, and we shouldn't minus 1 for it.
+ */
+ u32 burst = dep->endpoint.maxburst ?
+ dep->endpoint.maxburst - 1 : 0;
+
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+ }
+
+ if (ignore)
+ params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
+ | DWC3_DEPCFG_XFER_NOT_READY_EN;
- if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
- params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
- | DWC3_DEPCFG_STREAM_EVENT_EN;
- dep->stream_capable = true;
+ if (usb_ss_max_streams(comp_desc) &&
+ usb_endpoint_xfer_bulk(desc)) {
+ params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_STREAM_EVENT_EN;
+ dep->stream_capable = true;
+ }
}
- if (usb_endpoint_xfer_isoc(desc))
+ if (usb_endpoint_xfer_isoc(desc) || usb_endpoint_is_bulk_out(desc))
params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
/*
dep->interval = 1 << (desc->bInterval - 1);
}
+ if (cfg_action == DWC3_DEPCFG_ACTION_RESTORE)
+ params.param2 = dep->ep_state;
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETEPCONFIG, ¶ms);
+}
+
+static int dwc3_gadget_update_ebc_ep_config(struct dwc3 *dwc,
+ struct dwc3_ep *dep,
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc,
+ bool ignore_nrdy)
+{
+ u16 maxp;
+ struct dwc3_gadget_ep_cmd_params params;
+
+ if (!dep->ebc)
+ return -EINVAL;
+
+ memset(¶ms, 0x00, sizeof(params));
+
+ maxp = usb_endpoint_maxp(desc);
+
+ params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+ | DWC3_DEPCFG_MAX_PACKET_SIZE(maxp)
+ | DWC3_DEPCFG_ACTION_MODIFY;
+
+ if (dwc->gadget.speed == USB_SPEED_SUPER) {
+ u32 burst = 0;
+
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+ }
+ params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+ params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
+
+ if (!ignore_nrdy)
+ params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+ dep->stream_capable = false;
+
+ params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+ if (dep->direction)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ if (desc->bInterval) {
+ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+ dep->interval = 1 << (desc->bInterval - 1);
+ }
+
return dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_SETEPCONFIG, ¶ms);
}
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+ DWC3_DEPCFG_ACTION_INIT);
if (ret)
return ret;
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ if (dep->ebc)
+ dwc->is_ebc = 1;
+
if (!usb_endpoint_xfer_isoc(desc))
return 0;
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
if (!list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, 1);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->req_queued)) {
static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
+ struct ebc_io *ebc = dep->ebc;
u32 reg;
+ if (ebc) {
+ dwc->is_ebc = 0;
+
+ if (ebc->is_ondemand && ebc->xfer_stop)
+ ebc->xfer_stop();
+ }
+
dwc3_remove_requests(dwc, dep);
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
dep->type = 0;
dep->flags = 0;
+ /* set normal endpoint maxpacket to default value */
+ if (dep->number > 1)
+ dep->endpoint.maxpacket = 1024;
+
return 0;
}
dep = to_dwc3_ep(ep);
dwc = dep->dwc;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) &&
+ dep->flags != DWC3_EP_HIBERNATION) {
dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
dep->name);
return 0;
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, dma_addr_t dma,
- unsigned length, unsigned last, unsigned chain, unsigned node)
+ unsigned length, unsigned last, unsigned chain,
+ unsigned node, unsigned csp)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ if (csp) {
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+ }
+
+
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
}
/* The last TRB is a link TRB, not used for xfer */
- if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1)
return;
list_for_each_entry_safe(req, n, &dep->request_list, list) {
chain = false;
dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, chain, i);
+ last_one, chain, i, false);
if (last_one)
break;
}
} else {
+ unsigned csp = false;
+
dma = req->request.dma;
length = req->request.length;
trbs_left--;
if (list_is_last(&req->list, &dep->request_list))
last_one = 1;
+ /* For bulk-out ep, if req is the short packet and
+ * not the last one, enable CSP. */
+ if (req->short_packet && !last_one)
+ csp = true;
+
dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, false, 0);
+ last_one, false, 0, csp);
if (last_one)
break;
}
}
+/*
+ * dwc3_prepare_ebc_trbs - setup TRBs from DvC endpoint requests
+ * @dep: endpoint for which requests are being prepared
+ * @starting: true if the endpoint is idle and no requests are queued.
+ *
+ * The functions goes through the requests list and setups TRBs for the
+ * transfers.
+ */
+static void dwc3_prepare_ebc_trbs(struct dwc3_ep *dep,
+ bool starting)
+{
+ struct dwc3_request *req, *n;
+ struct dwc3_trb *trb_st_hw;
+ struct dwc3_trb *trb_link;
+ struct dwc3_trb *trb;
+ u32 trbs_left;
+ u32 trbs_num;
+ u32 trbs_mask;
+
+ /* BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);*/
+ trbs_num = dep->ebc->static_trb_pool_size;
+ trbs_mask = trbs_num - 1;
+
+ /* the first request must not be queued */
+ trbs_left = (dep->busy_slot - dep->free_slot) & trbs_mask;
+ /*
+ * if busy & slot are equal than it is either full or empty. If we are
+ * starting to proceed requests then we are empty. Otherwise we ar
+ * full and don't do anything
+ */
+ if (!trbs_left) {
+ if (!starting)
+ return;
+ trbs_left = trbs_num;
+ dep->busy_slot = 0;
+ dep->free_slot = 0;
+ }
+
+ /* The tailed TRB is a link TRB, not used for xfer */
+ if ((trbs_left <= 1))
+ return;
+
+ list_for_each_entry_safe(req, n, &dep->request_list, list) {
+ unsigned int last_one = 0;
+ unsigned int cur_slot;
+
+ /* revisit: dont use specific TRB buffer for Debug class? */
+ trb = &dep->trb_pool[dep->free_slot & trbs_mask];
+ cur_slot = dep->free_slot;
+ dep->free_slot++;
+
+ /* Skip the LINK-TRB */
+ if (((cur_slot & trbs_mask) == trbs_num - 1))
+ continue;
+
+ dwc3_gadget_move_request_queued(req);
+ trbs_left--;
+
+ /* Is our TRB pool empty? */
+ if (!trbs_left)
+ last_one = 1;
+ /* Is this the last request? */
+ if (list_empty(&dep->request_list))
+ last_one = 1;
+
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+ trb->bpl = lower_32_bits(req->request.dma);
+ trb->bph = upper_32_bits(req->request.dma);
+
+ switch (usb_endpoint_type(dep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ trb->ctrl = DWC3_TRBCTL_NORMAL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ BUG();
+ }
+
+ trb->ctrl |= DWC3_TRB_CTRL_HWO | DWC3_TRB_CTRL_CHN;
+
+ if (last_one) {
+ if (trbs_left >= 1) {
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link = &dep->trb_pool[dep->free_slot &
+ trbs_mask];
+ trb_link->bpl = lower_32_bits(
+ dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->bph = upper_32_bits(
+ dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->ctrl = DWC3_TRBCTL_LINK_TRB;
+ trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
+ trb_link->size = 0;
+ }
+ break;
+ }
+ }
+}
+
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
int start_new)
{
* new requests as we try to set the IOC bit only on the last request.
*/
if (start_new) {
- if (list_empty(&dep->req_queued))
- dwc3_prepare_trbs(dep, start_new);
+ if (dep->ebc)
+ dwc3_prepare_ebc_trbs(dep, start_new);
+ else
+ if (list_empty(&dep->req_queued))
+ dwc3_prepare_trbs(dep, start_new);
/* req points to the first request which will be sent */
req = next_request(&dep->req_queued);
* here and stop, unmap, free and del each of the linked
* requests instead of what we do now.
*/
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ if (!dep->ebc)
+ usb_gadget_unmap_request(&dwc->gadget, &req->request,
+ req->direction);
list_del(&req->list);
return ret;
}
WARN_ON_ONCE(!dep->resource_index);
}
+ if (dep->ebc) {
+ if (dep->ebc->is_ondemand == 1) {
+ ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+ dep->endpoint.desc, dep->comp_desc, true);
+
+ if (ret < 0) {
+ dev_dbg(dwc->dev,
+ "DEPCFG command failed on %s\n",
+ dep->name);
+ return ret;
+ }
+ dev_dbg(dwc->dev,
+ "successfully udpated DEPCFG command on %s\n",
+ dep->name);
+ }
+
+ if (dep->ebc->xfer_start)
+ dep->ebc->xfer_start();
+ }
+
return 0;
}
req->direction = dep->direction;
req->epnum = dep->number;
+ /* specific handling for debug class */
+ if (dep->ebc) {
+ list_add_tail(&req->list, &dep->request_list);
+
+ if ((dep->ebc->is_ondemand == 1) &&
+ (!(dep->flags & DWC3_EP_PENDING_REQUEST))) {
+ dev_dbg(dwc->dev, "%s: delayed to kick ebc transfers\n",
+ dep->name);
+ return 0;
+ }
+
+ if (dep->flags & DWC3_EP_BUSY) {
+ dwc3_stop_active_transfer(dwc, dep->number, 1);
+ dep->flags = DWC3_EP_ENABLED;
+ }
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ if (ret)
+ dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+ dep->name);
+ return ret;
+ }
+
/*
* We only add to our list of requests now and
* start consuming the list once we get XferNotReady
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, 1);
dep->flags = DWC3_EP_ENABLED;
}
return 0;
int ret;
+ spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -ESHUTDOWN;
}
dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
request, ep->name, request->length);
- spin_lock_irqsave(&dwc->lock, flags);
+ /* pad OUT endpoint buffer to MaxPacketSize per databook requirement*/
+ req->short_packet = false;
+ if (!IS_ALIGNED(request->length, ep->desc->wMaxPacketSize)
+ && !(dep->number & 1) && (dep->number != DWC3_EP_EBC_OUT_NB)) {
+ request->length = roundup(request->length,
+ (u32) ep->desc->wMaxPacketSize);
+ /* set flag for bulk-out short request */
+ if (usb_endpoint_is_bulk_out(dep->endpoint.desc))
+ req->short_packet = true;
+ }
+
ret = __dwc3_gadget_ep_queue(dep, req);
spin_unlock_irqrestore(&dwc->lock, flags);
}
if (r == req) {
/* wait until it is processed */
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, 1);
goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
return 0;
}
+static int __dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+{
+ u32 reg;
+ u32 timeout = 500;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (is_on)
+ reg |= DWC3_DCTL_RUN_STOP;
+ else
+ reg &= ~DWC3_DCTL_RUN_STOP;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (is_on) {
+ if (!(reg & DWC3_DSTS_DEVCTRLHLT))
+ break;
+ } else {
+ if (reg & DWC3_DSTS_DEVCTRLHLT)
+ break;
+ }
+ timeout--;
+ if (!timeout)
+ return -ETIMEDOUT;
+ udelay(1);
+ } while (1);
+
+ return 0;
+}
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 500;
+ struct usb_phy *usb_phy;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (is_on) {
+ if (is_on && !dwc->pullups_connected) {
if (dwc->revision <= DWC3_REVISION_187A) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
dwc->pullups_connected = true;
- } else {
+ } else if (!is_on && dwc->pullups_connected) {
reg &= ~DWC3_DCTL_RUN_STOP;
dwc->pullups_connected = false;
- }
+
+ /* WORKAROUND: reset PHY via FUNC_CTRL before disconnect
+ * to avoid PHY hang
+ */
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (usb_phy)
+ usb_phy_io_write(usb_phy,
+ 0x6D, ULPI_FUNC_CTRL);
+ usb_put_phy(usb_phy);
+ } else
+ return 0;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
is_on = !!is_on;
+ if (dwc->soft_connected == is_on)
+ return 0;
+
+ dwc->soft_connected = is_on;
+
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on);
spin_unlock_irqrestore(&dwc->lock, flags);
DWC3_DEVTEN_EVNTOVERFLOWEN |
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
+ DWC3_DEVTEN_HIBERNATIONREQEVTEN |
DWC3_DEVTEN_WKUPEVTEN |
DWC3_DEVTEN_ULSTCNGEN |
DWC3_DEVTEN_CONNECTDONEEN |
static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
-static int dwc3_gadget_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int dwc3_init_for_enumeration(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
struct dwc3_ep *dep;
- unsigned long flags;
int ret = 0;
- int irq;
u32 reg;
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
- irq, ret);
- goto err0;
- }
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget.name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
- }
-
- dwc->gadget_driver = driver;
-
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
reg |= dwc->maximum_speed;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ dwc->is_ebc = 0;
dwc->start_config_issued = false;
/* Start with SuperSpeed Default */
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err2;
+ return ret;
}
dep = dwc->eps[1];
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err3;
+ goto err0;
}
/* begin to receive SETUP packets */
dwc3_gadget_enable_irq(dwc);
+ return 0;
+err0:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+ return ret;
+}
+
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret = 0;
+ int irq = 0;
+ struct usb_phy *usb_phy;
+
+ if (dwc->is_otg) {
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!usb_phy) {
+ dev_err(dwc->dev, "OTG driver not available\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ otg_set_peripheral(usb_phy->otg, &dwc->gadget);
+ usb_put_phy(usb_phy);
+ } else {
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ ret = request_threaded_irq(irq, dwc3_interrupt,
+ dwc3_thread_interrupt, IRQF_SHARED,
+ "dwc3", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->gadget_driver) {
+ dev_err(dwc->dev, "%s is already bound to %s\n",
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err1;
+ }
+
+ dwc->gadget_driver = driver;
+
+ if (!dwc->is_otg) {
+ ret = dwc3_init_for_enumeration(dwc);
+ if (ret)
+ goto err2;
+ }
+
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
-err3:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
-
err2:
dwc->gadget_driver = NULL;
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
+ if (!dwc->is_otg)
+ free_irq(irq, dwc);
err0:
return ret;
return 0;
}
-static const struct usb_gadget_ops dwc3_gadget_ops = {
- .get_frame = dwc3_gadget_get_frame,
- .wakeup = dwc3_gadget_wakeup,
- .set_selfpowered = dwc3_gadget_set_selfpowered,
- .pullup = dwc3_gadget_pullup,
- .udc_start = dwc3_gadget_start,
- .udc_stop = dwc3_gadget_stop,
-};
+static int __dwc3_vbus_draw(struct dwc3 *dwc, unsigned ma)
+{
+ int ret;
+ struct usb_phy *usb_phy;
-/* -------------------------------------------------------------------------- */
+ usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!usb_phy) {
+ dev_err(dwc->dev, "OTG driver not available\n");
+ return -ENODEV;
+ }
+
+ ret = usb_phy_set_power(usb_phy, ma);
+ usb_put_phy(usb_phy);
+
+ return ret;
+}
+
+static int dwc3_vbus_draw(struct usb_gadget *g, unsigned ma)
+{
+ unsigned ma_otg = 0;
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ dev_dbg(dwc->dev, "otg_set_power: %d mA\n", ma);
+
+ switch (ma) {
+ case USB3_I_MAX_OTG:
+ ma_otg = OTG_USB3_900MA;
+ break;
+ case USB3_I_UNIT_OTG:
+ ma_otg = OTG_USB3_150MA;
+ break;
+ case USB2_I_MAX_OTG:
+ ma_otg = OTG_USB2_500MA;
+ break;
+ case USB2_I_UNIT_OTG:
+ ma_otg = OTG_USB2_100MA;
+ break;
+ default:
+ dev_err(dwc->dev,
+ "wrong charging current reported: %dmA\n", ma);
+ }
+
+ return __dwc3_vbus_draw(dwc, ma_otg);
+}
+
+static const struct usb_gadget_ops dwc3_gadget_ops = {
+ .get_frame = dwc3_gadget_get_frame,
+ .wakeup = dwc3_gadget_wakeup,
+ .set_selfpowered = dwc3_gadget_set_selfpowered,
+ .pullup = dwc3_gadget_pullup,
+ .udc_start = dwc3_gadget_start,
+ .udc_stop = dwc3_gadget_stop,
+};
+
+/* -------------------------------------------------------------------------- */
static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
u8 num, u32 direction)
{
struct dwc3_ep *dep;
+ struct ebc_io *ebc, *n;
u8 i;
for (i = 0; i < num; i++) {
dep->endpoint.name = dep->name;
dep->direction = (epnum & 1);
+ list_for_each_entry_safe(ebc, n, &ebc_io_ops, list) {
+ if (epnum == ebc->epnum) {
+ dep->ebc = ebc;
+ if (ebc->init)
+ if (ebc->init() == -ENODEV)
+ dev_err(dwc->dev,
+ "debug class init fail %d\n",
+ epnum);
+ }
+ }
+
if (epnum == 0 || epnum == 1) {
dep->endpoint.maxpacket = 512;
dep->endpoint.maxburst = 1;
*/
dep->flags = DWC3_EP_PENDING_REQUEST;
} else {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, 1);
dep->flags = DWC3_EP_ENABLED;
}
return 1;
dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+ if ((!usb_endpoint_xfer_isoc(dep->endpoint.desc)) &&
+ (!usb_endpoint_xfer_bulk(dep->endpoint.desc))) {
+ dev_dbg(dwc->dev, "%s is not an Isochronous/bulk endpoint\n",
dep->name);
return;
}
}
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
dep = dwc->eps[epnum];
+ if (dep->ebc) {
+ if (dep->ebc->is_ondemand == 1) {
+ ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+ dep->endpoint.desc, dep->comp_desc, false);
+ if (ret < 0) {
+ dev_dbg(dwc->dev,
+ "DEPCFG failed on %s\n",
+ dep->name);
+ return;
+ }
+ dev_dbg(dwc->dev,
+ "successfully udpated DEPCFG command on %s\n",
+ dep->name);
+ }
+
+ if (dep->ebc->xfer_stop)
+ dep->ebc->xfer_stop();
+ else
+ dev_dbg(dwc->dev, "%s xfer_stop() NULL\n", dep->name);
+ }
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
+ dep->number);
+
if (!dep->resource_index)
return;
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
- cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+ cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+ if (forcerm)
+ cmd |= DWC3_DEPCMD_HIPRI_FORCERM;
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
WARN_ON_ONCE(ret);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
+static void link_state_change_work(struct work_struct *data)
+{
+ struct dwc3 *dwc = container_of((struct delayed_work *)data,
+ struct dwc3, link_work);
+
+ if (dwc->link_state == DWC3_LINK_STATE_U3) {
+ dev_info(dwc->dev, "device suspended; notify OTG\n");
+ __dwc3_vbus_draw(dwc, OTG_DEVICE_SUSPEND);
+ }
+}
+
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
+ __dwc3_vbus_draw(dwc, OTG_USB3_150MA);
break;
case DWC3_DCFG_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
+ __dwc3_vbus_draw(dwc, OTG_USB2_100MA);
break;
case DWC3_DCFG_FULLSPEED2:
case DWC3_DCFG_FULLSPEED1:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
+ __dwc3_vbus_draw(dwc, OTG_USB2_100MA);
break;
case DWC3_DCFG_LOWSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
dwc->gadget.ep0->maxpacket = 8;
dwc->gadget.speed = USB_SPEED_LOW;
+ __dwc3_vbus_draw(dwc, OTG_USB2_100MA);
break;
}
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = dwc3_gadget_set_ep_config(dwc, dep,
+ &dwc3_gadget_ep0_desc, NULL, false,
+ DWC3_DEPCFG_ACTION_MODIFY);
if (ret) {
- dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ dev_err(dwc->dev, "failed to update %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = dwc3_gadget_set_ep_config(dwc, dep,
+ &dwc3_gadget_ep0_desc, NULL, false,
+ DWC3_DEPCFG_ACTION_MODIFY);
if (ret) {
- dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ dev_err(dwc->dev, "failed to update %s\n", dep->name);
return;
}
{
dev_vdbg(dwc->dev, "%s\n", __func__);
+ dev_info(dwc->dev, "device resumed; notify OTG\n");
+ __dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+
/*
* TODO take core out of low power mode when that's
* implemented.
dwc->link_state = next;
+ if (next == DWC3_LINK_STATE_U3)
+ schedule_delayed_work(
+ &dwc->link_work, msecs_to_jiffies(1000));
+
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
+static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ if (dwc->hiber_enabled)
+ pm_runtime_put(dwc->dev);
+}
+
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
+ u32 reg;
+
switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
dwc3_gadget_disconnect_interrupt(dwc);
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
+ case DWC3_DEVICE_EVENT_HIBER_REQ:
+ dwc3_gadget_hibernation_interrupt(dwc);
+ break;
case DWC3_DEVICE_EVENT_EOPF:
dev_vdbg(dwc->dev, "End of Periodic Frame\n");
break;
break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
dev_vdbg(dwc->dev, "Erratic Error\n");
+
+ /* Controller may generate too many Erratic Errors Messages,
+ * Disable it until we find a way to recover the failure.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+ reg &= ~DWC3_DEVTEN_ERRTICERREN;
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ dev_info(dwc->dev, "Erratic Error Event disabled\n");
break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
dev_vdbg(dwc->dev, "Command Complete\n");
struct dwc3 *dwc = _dwc;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
+ u32 reg;
int i;
spin_lock_irqsave(&dwc->lock, flags);
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
+
+ /* Unmask interrupt */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(i));
+ reg &= ~DWC3_GEVNTSIZ_INTMASK;
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(i), reg);
}
spin_unlock_irqrestore(&dwc->lock, flags);
{
struct dwc3_event_buffer *evt;
u32 count;
+ u32 reg;
evt = dwc->ev_buffs[buf];
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
+ /* Mask interrupt */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+ reg |= DWC3_GEVNTSIZ_INTMASK;
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
return IRQ_WAKE_THREAD;
}
irqreturn_t ret = IRQ_NONE;
spin_lock(&dwc->lock);
+ if (dwc->pm_state != PM_ACTIVE) {
+ if (dwc->pm_state == PM_SUSPENDED) {
+ dev_info(dwc->dev, "u2/u3 pmu is received\n");
+ pm_runtime_get(dwc->dev);
+ dwc->pm_state = PM_RESUMING;
+ ret = IRQ_HANDLED;
+ }
+ goto out;
+ }
for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
ret = status;
}
+out:
spin_unlock(&dwc->lock);
return ret;
u32 reg;
int ret;
+ dwc->scratch_array = dma_alloc_coherent(dwc->dev,
+ sizeof(*dwc->scratch_array),
+ &dwc->scratch_array_dma, GFP_KERNEL);
+ if (!dwc->scratch_array) {
+ dev_err(dwc->dev, "failed to allocate scratch_arrary\n");
+ return -ENOMEM;
+ }
+
+ dwc->scratch_buffer[0] = dma_alloc_coherent(dwc->dev,
+ DWC3_SCRATCH_BUF_SIZE,
+ &dwc->scratch_array->dma_adr[0], GFP_KERNEL);
+ if (!dwc->scratch_buffer[0]) {
+ dev_err(dwc->dev, "failed to allocate scratch_buffer\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
if (!dwc->ctrl_req) {
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
+ INIT_DELAYED_WORK(&dwc->link_work, link_state_change_work);
+
/*
* REVISIT: Here we should clear all pending IRQs to be
* sure we're starting from a well known location.
dwc->ctrl_req, dwc->ctrl_req_addr);
err0:
+ dma_free_coherent(dwc->dev,
+ DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+ (dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+err:
+ dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+ dwc->scratch_array, dwc->scratch_array_dma);
return ret;
}
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
dwc->ctrl_req, dwc->ctrl_req_addr);
+
+ dma_free_coherent(dwc->dev,
+ DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+ (dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+ dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+ dwc->scratch_array, dwc->scratch_array_dma);
}
int dwc3_gadget_prepare(struct dwc3 *dwc)
err0:
return ret;
}
+
+void dwc3_register_io_ebc(struct ebc_io *ebc)
+{
+ list_add_tail(&ebc->list, &ebc_io_ops);
+}
+
+void dwc3_unregister_io_ebc(struct ebc_io *ebc)
+{
+ list_del(&ebc->list);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static void dwc3_gadget_get_ep_state(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ memset(¶ms, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_GETEPSTATE, ¶ms);
+ WARN_ON_ONCE(ret);
+
+ dep->ep_state = dwc3_readl(dwc->regs, DWC3_DEPCMDPAR2(dep->number));
+}
+
+static void dwc3_cache_hwregs(struct dwc3 *dwc)
+{
+ struct dwc3_hwregs *regs = &dwc->hwregs;
+
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ regs->guctl = dwc3_readl(dwc->regs, DWC3_GUCTL);
+ regs->grxthrcfg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ regs->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ regs->devten = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+ regs->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
+ regs->gusb3pipectl0 = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ regs->gusb2phycfg0 = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ regs->gevntadrlo = dwc3_readl(dwc->regs, DWC3_GEVNTADRLO(0));
+ regs->gevntadrhi = dwc3_readl(dwc->regs, DWC3_GEVNTADRHI(0));
+ regs->gevntsiz = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
+}
+
+static void dwc3_restore_hwregs(struct dwc3 *dwc)
+{
+ struct dwc3_hwregs *regs = &dwc->hwregs;
+
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ dwc3_writel(dwc->regs, DWC3_GUCTL, regs->guctl);
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, regs->grxthrcfg);
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), regs->gusb3pipectl0);
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), regs->gusb2phycfg0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), regs->gevntadrlo);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), regs->gevntadrhi);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), regs->gevntsiz);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+ dwc3_writel(dwc->regs, DWC3_DCFG, regs->dcfg);
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, regs->devten);
+ dwc3_writel(dwc->regs, DWC3_GCTL, regs->gctl);
+}
+
+static int dwc3_gadget_controller_save_state(struct dwc3 *dwc)
+{
+ u32 reg;
+ u32 timeout = 1000;
+
+ dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ reg |= DWC3_DCTL_CSS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_SSS))
+ return 0;
+
+ timeout--;
+ if (!timeout)
+ return -ETIMEDOUT;
+ udelay(1);
+ } while (1);
+
+ dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+static int dwc3_gadget_controller_restore_state(struct dwc3 *dwc)
+{
+ u32 reg;
+ u32 timeout = 1000;
+
+ dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ reg |= DWC3_DCTL_CRS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_RSS))
+ return 0;
+
+ timeout--;
+ if (!timeout)
+ return -ETIMEDOUT;
+ udelay(1);
+ } while (1);
+
+ dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (is_on)
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ else
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+}
+
+int dwc3_runtime_suspend(struct device *device)
+{
+ struct dwc3 *dwc;
+ struct platform_device *pdev;
+ unsigned long flags;
+ u32 epnum;
+ struct dwc3_ep *dep;
+
+ pdev = to_platform_device(device);
+ dwc = platform_get_drvdata(pdev);
+
+ if (!dwc || !dwc->hiber_enabled)
+ return 0;
+
+ dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->pm_state != PM_ACTIVE) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ dep->flags_backup = dep->flags;
+ if (dep->flags & DWC3_EP_BUSY)
+ dwc3_stop_active_transfer(dwc, epnum, 0);
+
+ dwc3_gadget_get_ep_state(dwc, dep);
+
+ dep->flags = DWC3_EP_HIBERNATION;
+ }
+
+ __dwc3_gadget_run_stop(dwc, 0);
+ dwc3_gadget_keep_conn(dwc, 1);
+
+ dwc3_cache_hwregs(dwc);
+
+ dwc3_gadget_disable_irq(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+
+ dwc3_gadget_controller_save_state(dwc);
+
+ dwc->pm_state = PM_SUSPENDED;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ schedule_delayed_work(&dwc->link_work, msecs_to_jiffies(1000));
+ dev_info(dwc->dev, "suspended\n");
+ dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+
+ return 0;
+}
+
+int dwc3_runtime_resume(struct device *device)
+{
+ struct dwc3 *dwc;
+ struct platform_device *pdev;
+ unsigned long flags;
+ int ret;
+ u32 epnum;
+ u32 timeout = 500;
+ u32 reg;
+ u8 link_state;
+ struct dwc3_ep *dep;
+
+ pdev = to_platform_device(device);
+ dwc = platform_get_drvdata(pdev);
+
+ if (!dwc || !dwc->hiber_enabled)
+ return 0;
+
+ dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->pm_state == PM_ACTIVE ||
+ dwc->pm_state == PM_DISCONNECTED) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
+ dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+ dwc->scratch_array_dma & 0xffffffffU);
+
+ dwc3_gadget_controller_restore_state(dwc);
+
+ dwc3_restore_hwregs(dwc);
+
+ dep = dwc->eps[0];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+ dep->name);
+ goto err0;
+ }
+
+ dep = dwc->eps[1];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+ dep->name);
+ goto err1;
+ }
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_gadget_ep_cmd_params params;
+
+ dep = dwc->eps[epnum];
+ if (dep->flags_backup & DWC3_EP_BUSY) {
+ dwc->ep0_trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ memset(¶ms, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+ DWC3_DEPCMD_STARTTRANSFER, ¶ms);
+ WARN_ON_ONCE(ret);
+ }
+
+ dep->flags = dep->flags_backup;
+ dep->flags_backup = 0;
+ }
+
+ __dwc3_gadget_run_stop(dwc, 1);
+ dwc3_gadget_keep_conn(dwc, 1);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_DCNRD))
+ break;
+
+ timeout--;
+ if (!timeout)
+ break;
+ udelay(1);
+ } while (1);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ link_state = DWC3_DSTS_USBLNKST(reg);
+ switch (link_state) {
+ case DWC3_LINK_STATE_U3:
+ case DWC3_LINK_STATE_RESUME:
+ dwc3_gadget_conndone_interrupt(dwc);
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+ if (!(dep->flags_backup & DWC3_EP_ENABLED))
+ continue;
+ if (dep->endpoint.desc)
+ dwc3_gadget_set_ep_config(dwc,
+ dep, dep->endpoint.desc, dep->comp_desc,
+ false, DWC3_DEPCFG_ACTION_RESTORE);
+
+ dwc3_gadget_set_xfer_resource(dwc, dep);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg |= DWC3_DALEPENA_EP(epnum);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+ if (dep->flags_backup & DWC3_EP_STALL)
+ __dwc3_gadget_ep_set_halt(dep, 1);
+
+ if (dep->flags_backup & DWC3_EP_BUSY) {
+ struct dwc3_request *req;
+ struct dwc3_gadget_ep_cmd_params params;
+
+ req = next_request(&dep->req_queued);
+ if (!req)
+ break;
+ req->trb->ctrl |= DWC3_TRB_CTRL_HWO;
+ memset(¶ms, 0, sizeof(params));
+ params.param0 = upper_32_bits(req->trb_dma);
+ params.param1 = lower_32_bits(req->trb_dma);
+
+ ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+ DWC3_DEPCMD_STARTTRANSFER,
+ ¶ms);
+ WARN_ON_ONCE(ret);
+
+ }
+
+ dep->flags = dep->flags_backup;
+ dep->flags_backup = 0;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ break;
+ case DWC3_LINK_STATE_RESET:
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ break;
+ default:
+ /* wait for USB Reset or Connect Done event */
+ break;
+ }
+
+ dwc->pm_state = PM_ACTIVE;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ __dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+ dev_info(dwc->dev, "resumed\n");
+ dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+ return 0;
+
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+#else
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on) {}
+#endif
#define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint))
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
+/* max power consumption of the device from the bus */
+#define USB3_I_MAX_OTG 896
+#define USB3_I_UNIT_OTG 144
+#define USB2_I_MAX_OTG 500
+#define USB2_I_UNIT_OTG 100
+
/* DEPCFG parameter 1 */
#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
+#define DWC3_DEPCFG_EBC_MODE_EN (1 << 15)
#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
--- /dev/null
+/*
+ * otg.c - Designware USB3 DRD Controller OTG driver
+ *
+ * Authors: Wang Yu <yu.y.wang@intel.com>
+ * Synopsys inc
+ *
+ * Description:
+ *
+ * This driver is developed base on dwc_otg3.c which provided by
+ * Synopsys company. Yu removed some unused features from it,
+ * for example: HNP/SRP/ADP support. Because haven't use these features
+ * so far. And add charger detection support into the state machine.
+ * Support SDP/CDP/DCP/Micro-ACA/ACA-Dock and SE1 USB charger type.
+ *
+ * Beside that, make all hardware dependence as arguments, which need
+ * vendor to implemented by themselves. For example: VBus drive, USB ID
+ * pin value and so on.
+ *
+ * To enable this OTG driver, user have to call dwc3_otg_register API to
+ * regiter one dwc3_otg_hw_ops object which include all hardware
+ * dependent code.
+ *
+ * License:
+ * Below declaration is copy from Synopsys DWC3 SW 2.10a released README.txt.
+ *
+ * IMPORTANT:
+ *
+ * Synopsys SS USB3 Linux Driver Software and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+struct dwc3_otg_hw_ops *dwc3_otg_pdata;
+struct dwc_device_par *platform_par;
+
+static struct mutex lock;
+static const char driver_name[] = "dwc3_otg";
+static struct dwc_otg2 *the_transceiver;
+static void dwc_otg_remove(struct pci_dev *pdev);
+
+
+static inline struct dwc_otg2 *xceiv_to_dwc_otg2(struct usb_otg *x)
+{
+ return container_of(x, struct dwc_otg2, otg);
+}
+
+struct dwc_otg2 *dwc3_get_otg(void)
+{
+ return the_transceiver;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_otg);
+
+/* Caller must hold otg->lock */
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg)
+{
+ if (!otg->main_thread)
+ return;
+
+ otg_dbg(otg, "\n");
+ /* Tell the main thread that something has happened */
+ otg->main_wakeup_needed = 1;
+ wake_up_interruptible(&otg->main_wq);
+}
+EXPORT_SYMBOL_GPL(dwc3_wakeup_otg_thread);
+
+static int sleep_main_thread_timeout(struct dwc_otg2 *otg, int msecs)
+{
+ signed long jiffies;
+ int rc = msecs;
+
+ if (otg->state == DWC_STATE_EXIT) {
+ otg_dbg(otg, "Main thread exiting\n");
+ rc = -EINTR;
+ goto done;
+ }
+
+ if (signal_pending(current)) {
+ otg_dbg(otg, "Main thread signal pending\n");
+ rc = -EINTR;
+ goto done;
+ }
+ if (otg->main_wakeup_needed) {
+ otg_dbg(otg, "Main thread wakeup needed\n");
+ rc = msecs;
+ goto done;
+ }
+
+ jiffies = msecs_to_jiffies(msecs);
+ rc = wait_event_freezable_timeout(otg->main_wq,
+ otg->main_wakeup_needed,
+ jiffies);
+
+ if (otg->state == DWC_STATE_EXIT) {
+ otg_dbg(otg, "Main thread exiting\n");
+ rc = -EINTR;
+ goto done;
+ }
+
+ if (rc > 0)
+ rc = jiffies_to_msecs(rc);
+
+done:
+ otg->main_wakeup_needed = 0;
+ return rc;
+}
+
+static int sleep_main_thread(struct dwc_otg2 *otg)
+{
+ int rc = 0;
+
+ do {
+ rc = sleep_main_thread_timeout(otg, 5000);
+ } while (rc == 0);
+
+ return rc;
+}
+
+static void get_and_clear_events(struct dwc_otg2 *otg,
+ u32 otg_mask,
+ u32 user_mask,
+ u32 *otg_events,
+ u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events) {
+ if (otg->otg_events & otg_mask) {
+ *otg_events = otg->otg_events;
+ otg->otg_events &= ~otg_mask;
+ } else
+ *otg_events = 0;
+ }
+
+ if (user_events) {
+ if (otg->user_events & user_mask) {
+ *user_events = otg->user_events;
+ otg->user_events &= ~user_mask;
+ } else
+ *user_events = 0;
+ }
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc_otg2 *otg,
+ u32 otg_mask,
+ u32 user_mask,
+ u32 *otg_events,
+ u32 *user_events)
+{
+ get_and_clear_events(otg, otg_mask, user_mask,
+ otg_events, user_events);
+
+ otg_dbg(otg, "Event occurred:");
+
+ if (otg_events && (*otg_events & otg_mask)) {
+ otg_dbg(otg, "otg_events=0x%x, otg_mask=0x%x",
+ *otg_events, otg_mask);
+ return 1;
+ }
+
+ if (user_events && (*user_events & user_mask)) {
+ otg_dbg(otg, "user_events=0x%x, user_mask=0x%x",
+ *user_events, user_mask);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sleep_until_event(struct dwc_otg2 *otg,
+ u32 otg_mask, u32 user_mask,
+ u32 *otg_events, u32 *user_events,
+ int timeout)
+{
+ int rc = 0;
+
+ pm_runtime_mark_last_busy(otg->dev);
+ pm_runtime_put_autosuspend(otg->dev);
+ /* Wait until it occurs, or timeout, or interrupt. */
+ if (timeout) {
+ otg_dbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+ rc = sleep_main_thread_until_condition_timeout(otg,
+ check_event(otg, otg_mask,
+ user_mask, otg_events, user_events), timeout);
+ } else {
+ otg_dbg(otg, "Waiting for event (no timeout)...\n");
+ rc = sleep_main_thread_until_condition(otg,
+ check_event(otg, otg_mask,
+ user_mask, otg_events, user_events));
+ }
+ pm_runtime_get_sync(otg->dev);
+
+ /* Disable the events */
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, ADPEVTEN, 0);
+
+ otg_dbg(otg, "Woke up rc=%d\n", rc);
+
+ return rc;
+}
+
+
+static int start_host(struct dwc_otg2 *otg)
+{
+ int ret = 0;
+ struct usb_hcd *hcd = NULL;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host) {
+ otg_err(otg, "Haven't set host yet!\n");
+ return -ENODEV;
+ }
+
+ if (dwc3_otg_pdata->prepare_start_host)
+ ret = dwc3_otg_pdata->prepare_start_host(otg);
+
+ /* Start host driver */
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ ret = otg->start_host(hcd);
+
+ return ret;
+}
+
+static int stop_host(struct dwc_otg2 *otg)
+{
+ int ret = -1;
+ struct usb_hcd *hcd = NULL;
+
+ otg_dbg(otg, "\n");
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ if (otg->otg.host)
+ ret = otg->stop_host(hcd);
+
+ if (dwc3_otg_pdata->after_stop_host)
+ ret = dwc3_otg_pdata->after_stop_host(otg);
+
+ return ret;
+}
+
+static void start_peripheral(struct dwc_otg2 *otg)
+{
+ struct usb_gadget *gadget;
+ int ret;
+
+ if (dwc3_otg_pdata->prepare_start_peripheral)
+ ret = dwc3_otg_pdata->prepare_start_peripheral(otg);
+
+ gadget = otg->otg.gadget;
+ if (!gadget) {
+ otg_err(otg, "Haven't set gadget yet!\n");
+ return;
+ }
+
+ otg->start_device(gadget);
+}
+
+static void stop_peripheral(struct dwc_otg2 *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ int ret;
+
+ if (!gadget)
+ return;
+
+ otg->stop_device(gadget);
+
+ if (dwc3_otg_pdata->after_stop_peripheral)
+ ret = dwc3_otg_pdata->after_stop_peripheral(otg);
+}
+
+static int get_id(struct dwc_otg2 *otg)
+{
+ if (dwc3_otg_pdata->get_id)
+ return dwc3_otg_pdata->get_id(otg);
+ return RID_UNKNOWN;
+}
+
+static int dwc_otg_notify_charger_type(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event)
+{
+ if (dwc3_otg_pdata->notify_charger_type)
+ return dwc3_otg_pdata->notify_charger_type(otg, event);
+
+ return 0;
+}
+
+static int dwc_otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+ unsigned long flags;
+ struct power_supply_cable_props *cap =
+ (struct power_supply_cable_props *)data;
+ struct dwc_otg2 *otg = the_transceiver;
+
+ if (!x)
+ return -ENODEV;
+
+ if (!data)
+ return -EINVAL;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ cap->chrg_type = otg->charging_cap.chrg_type;
+ cap->chrg_evt = otg->charging_cap.chrg_evt;
+ cap->ma = otg->charging_cap.ma;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ return 0;
+}
+
+static int dwc_otg_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+ if (dwc3_otg_pdata->enable_vbus)
+ return dwc3_otg_pdata->enable_vbus(otg, enable);
+
+ return -EINVAL;
+}
+
+static int is_self_powered_b_device(struct dwc_otg2 *otg)
+{
+ return get_id(otg) == RID_GND;
+}
+
+static enum dwc_otg_state do_wait_vbus_raise(struct dwc_otg2 *otg)
+{
+ int ret;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ u32 otg_mask = 0;
+ u32 user_mask = 0;
+
+ otg_mask = OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+ ret = sleep_until_event(otg, otg_mask,
+ user_mask, &otg_events,
+ &user_events, VBUS_TIMEOUT);
+ if (ret < 0)
+ return DWC_STATE_EXIT;
+
+ if (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+ otg_dbg(otg, "OEVT_B_SES_VLD_EVT\n");
+ return DWC_STATE_CHARGER_DETECTION;
+ }
+
+ /* timeout*/
+ if (!ret)
+ return DWC_STATE_A_HOST;
+
+ return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_wait_vbus_fall(struct dwc_otg2 *otg)
+{
+ int ret;
+
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ u32 otg_mask = 0;
+ u32 user_mask = 0;
+
+ otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ ret = sleep_until_event(otg, otg_mask,
+ user_mask, &otg_events,
+ &user_events, VBUS_TIMEOUT);
+ if (ret < 0)
+ return DWC_STATE_EXIT;
+
+ if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ if (otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+ dwc_otg_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+ return DWC_STATE_B_IDLE;
+ }
+
+ /* timeout*/
+ if (!ret) {
+ otg_err(otg, "Haven't get VBus drop event! Maybe something wrong\n");
+ return DWC_STATE_B_IDLE;
+ }
+
+ return DWC_STATE_INVALID;
+}
+
+static enum dwc_otg_state do_charging(struct dwc_otg2 *otg)
+{
+ int ret;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ u32 otg_mask = 0;
+ u32 user_mask = 0;
+
+ otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ if (dwc3_otg_pdata->do_charging)
+ dwc3_otg_pdata->do_charging(otg);
+
+ ret = sleep_until_event(otg, otg_mask,
+ user_mask, &otg_events,
+ &user_events, 0);
+ if (ret < 0)
+ return DWC_STATE_EXIT;
+
+ if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ dwc_otg_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+ return DWC_STATE_B_IDLE;
+ }
+
+ return DWC_STATE_INVALID;
+}
+
+static enum power_supply_charger_cable_type
+ get_charger_type(struct dwc_otg2 *otg)
+{
+ if (dwc3_otg_pdata->get_charger_type)
+ return dwc3_otg_pdata->get_charger_type(otg);
+
+ return POWER_SUPPLY_CHARGER_TYPE_NONE;
+}
+
+static enum dwc_otg_state do_charger_detection(struct dwc_otg2 *otg)
+{
+ enum dwc_otg_state state = DWC_STATE_INVALID;
+ enum power_supply_charger_cable_type charger =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+ unsigned long flags, ma = 0;
+
+ charger = get_charger_type(otg);
+ switch (charger) {
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+ otg_err(otg, "Ignore micro ACA charger.\n");
+ charger = POWER_SUPPLY_CHARGER_TYPE_NONE;
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ state = DWC_STATE_B_PERIPHERAL;
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ state = DWC_STATE_A_HOST;
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ state = DWC_STATE_CHARGING;
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_NONE:
+ default:
+ if (is_self_powered_b_device(otg)) {
+ state = DWC_STATE_A_HOST;
+ charger = POWER_SUPPLY_CHARGER_TYPE_B_DEVICE;
+ break;
+ }
+ };
+
+ switch (charger) {
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ ma = 1500;
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ /* Notify SDP current is 100ma before enumeration. */
+ ma = 100;
+ break;
+ default:
+ otg_err(otg, "Charger type is not valid to notify battery\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.chrg_type = charger;
+ otg->charging_cap.ma = ma;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ switch (charger) {
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ if (dwc_otg_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT) < 0)
+ otg_err(otg, "Notify battery type failed!\n");
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ /* SDP is complicate, it will be handle in set_power */
+ default:
+ break;
+ }
+
+ return state;
+}
+
+static enum dwc_otg_state do_connector_id_status(struct dwc_otg2 *otg)
+{
+ int ret;
+ unsigned long flags;
+ u32 events = 0, user_events = 0;
+ u32 otg_mask = 0, user_mask = 0;
+
+ otg_dbg(otg, "\n");
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+ otg->charging_cap.ma = 0;
+ otg->charging_cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+stay_b_idle:
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+ user_mask = USER_ID_B_CHANGE_EVENT |
+ USER_ID_A_CHANGE_EVENT;
+
+ if (dwc3_otg_pdata->b_idle)
+ dwc3_otg_pdata->b_idle(otg);
+
+ ret = sleep_until_event(otg, otg_mask,
+ user_mask, &events,
+ &user_events, 0);
+ if (ret < 0)
+ return DWC_STATE_EXIT;
+
+ if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+ return DWC_STATE_CHARGER_DETECTION;
+ }
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+
+ /* Prevent user fast plug out after plug in.
+ * It will cause the first ID change event lost.
+ * So need to check real ID currently.
+ */
+ if (get_id(otg) == RID_FLOAT) {
+ otg_dbg(otg, "Stay DWC_STATE_INIT\n");
+ goto stay_b_idle;
+ }
+ return DWC_STATE_WAIT_VBUS_RAISE;
+ }
+
+ if (user_events & USER_ID_A_CHANGE_EVENT) {
+ otg_dbg(otg, "events is user id A change\n");
+ return DWC_STATE_A_HOST;
+ }
+
+ if (user_events & USER_ID_B_CHANGE_EVENT) {
+ otg_dbg(otg, "events is user id B change\n");
+ return DWC_STATE_B_PERIPHERAL;
+ }
+
+ return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_a_host(struct dwc_otg2 *otg)
+{
+ int rc = 0;
+ u32 otg_events, user_events, otg_mask, user_mask;
+ int id = RID_UNKNOWN;
+ unsigned long flags;
+
+ if (otg->charging_cap.chrg_type !=
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+ dwc_otg_enable_vbus(otg, 1);
+
+ /* meant receive vbus valid event*/
+ if (do_wait_vbus_raise(otg) == DWC_STATE_A_HOST)
+ otg_err(otg, "Drive VBUS maybe fail!\n");
+ }
+
+ rc = start_host(otg);
+ if (rc < 0) {
+ stop_host(otg);
+ otg_err(otg, "start_host failed!");
+ return DWC_STATE_INVALID;
+ }
+
+stay_host:
+ otg_events = 0;
+ user_events = 0;
+
+ user_mask = USER_A_BUS_DROP |
+ USER_ID_B_CHANGE_EVENT;
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0) {
+ stop_host(otg);
+ return DWC_STATE_EXIT;
+ }
+
+ /* Higher priority first */
+ if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+
+ /* ACA-Dock plug out */
+ if (otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+ dwc_otg_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+ else
+ dwc_otg_enable_vbus(otg, 0);
+
+ stop_host(otg);
+ return DWC_STATE_B_IDLE;
+ }
+
+ if (user_events & USER_A_BUS_DROP) {
+ /* Due to big consume by DUT, even ACA-Dock connected,
+ * the battery capability still maybe decrease. For this
+ * case, still save host mode. Because DUT haven't drive VBus.*/
+ if (otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+ goto stay_host;
+
+ dwc_otg_enable_vbus(otg, 0);
+ stop_host(otg);
+ return DWC_STATE_B_IDLE;
+ }
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ id = get_id(otg);
+
+ /* Plug out ACA_DOCK/USB device */
+ if (id == RID_FLOAT) {
+ if (otg->charging_cap.chrg_type ==
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+ /* ACA_DOCK plug out, receive
+ * id change prior to vBus change
+ */
+ stop_host(otg);
+ } else {
+ /* Normal USB device plug out */
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.chrg_type =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ stop_host(otg);
+ dwc_otg_enable_vbus(otg, 0);
+ }
+ } else if (id == RID_GND || id == RID_A) {
+ otg_dbg(otg, "Stay DWC_STATE_A_HOST!!\n");
+ /* Prevent user fast plug in after plug out.
+ * It will cause the first ID change event lost.
+ * So need to check real ID currently.
+ */
+ goto stay_host;
+ } else {
+ otg_err(otg, "Meet invalid charger cases!");
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->charging_cap.chrg_type =
+ POWER_SUPPLY_CHARGER_TYPE_NONE;
+ spin_unlock_irqrestore(&otg->lock, flags);
+
+ stop_host(otg);
+ }
+ return DWC_STATE_WAIT_VBUS_FALL;
+ }
+
+ /* Higher priority first */
+ if (user_events & USER_ID_B_CHANGE_EVENT) {
+ otg_dbg(otg, "USER_ID_B_CHANGE_EVENT\n");
+ stop_host(otg);
+ otg->user_events |= USER_ID_B_CHANGE_EVENT;
+ return DWC_STATE_B_IDLE;
+ }
+
+ /* Invalid state */
+ return DWC_STATE_INVALID;
+}
+
+static int do_b_peripheral(struct dwc_otg2 *otg)
+{
+ int rc = 0;
+ u32 otg_mask, user_mask, otg_events, user_events;
+
+ otg_mask = 0;
+ user_mask = 0;
+ otg_events = 0;
+ user_events = 0;
+
+ otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+ user_mask = USER_ID_A_CHANGE_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return DWC_STATE_EXIT;
+
+ if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ dwc_otg_notify_charger_type(otg,
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+ return DWC_STATE_B_IDLE;
+ }
+
+ if (user_events & USER_ID_A_CHANGE_EVENT) {
+ otg_dbg(otg, "USER_ID_A_CHANGE_EVENT\n");
+ otg->user_events |= USER_ID_A_CHANGE_EVENT;
+ return DWC_STATE_B_IDLE;
+ }
+
+ return DWC_STATE_INVALID;
+}
+
+/* Charger driver may send ID change and VBus change event to OTG driver.
+ * This is like IRQ handler, just the event source is from charger driver.
+ * Because on Merrifield platform, the ID line and VBus line are connect to
+ * PMic which can make USB controller and PHY power off to save power.
+ */
+static int dwc_otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ if (dwc3_otg_pdata->otg_notifier_handler)
+ return dwc3_otg_pdata->otg_notifier_handler(nb, event, data);
+
+ return NOTIFY_DONE;
+}
+
+int otg_main_thread(void *data)
+{
+ struct dwc_otg2 *otg = (struct dwc_otg2 *)data;
+
+ /* Allow the thread to be killed by a signal, but set the signal mask
+ * to block everything but INT, TERM, KILL, and USR1. */
+ allow_signal(SIGINT);
+ allow_signal(SIGTERM);
+ allow_signal(SIGKILL);
+ allow_signal(SIGUSR1);
+
+ pm_runtime_get_sync(otg->dev);
+
+ /* Allow the thread to be frozen */
+ set_freezable();
+
+ otg_dbg(otg, "Thread running\n");
+ while (otg->state != DWC_STATE_TERMINATED) {
+ int next = DWC_STATE_B_IDLE;
+ otg_dbg(otg, "\n\n\nMain thread entering state\n");
+
+ switch (otg->state) {
+ case DWC_STATE_B_IDLE:
+ otg_dbg(otg, "DWC_STATE_B_IDLE\n");
+ next = do_connector_id_status(otg);
+ break;
+ case DWC_STATE_CHARGER_DETECTION:
+ otg_dbg(otg, "DWC_STATE_CHARGER_DETECTION\n");
+ next = do_charger_detection(otg);
+ break;
+ case DWC_STATE_WAIT_VBUS_RAISE:
+ otg_dbg(otg, "DWC_STATE_WAIT_VBUS_RAISE\n");
+ next = do_wait_vbus_raise(otg);
+ break;
+ case DWC_STATE_WAIT_VBUS_FALL:
+ otg_dbg(otg, "DWC_STATE_WAIT_VBUS_FALL\n");
+ next = do_wait_vbus_fall(otg);
+ break;
+ case DWC_STATE_CHARGING:
+ otg_dbg(otg, "DWC_STATE_CHARGING\n");
+ next = do_charging(otg);
+ break;
+ case DWC_STATE_A_HOST:
+ otg_dbg(otg, "DWC_STATE_A_HOST\n");
+ next = do_a_host(otg);
+ break;
+ case DWC_STATE_B_PERIPHERAL:
+ otg_dbg(otg, "DWC_STATE_B_PERIPHERAL\n");
+ start_peripheral(otg);
+ next = do_b_peripheral(otg);
+
+ stop_peripheral(otg);
+ break;
+ case DWC_STATE_EXIT:
+ otg_dbg(otg, "DWC_STATE_EXIT\n");
+ next = DWC_STATE_TERMINATED;
+ break;
+ case DWC_STATE_INVALID:
+ otg_dbg(otg, "DWC_STATE_INVALID!!!\n");
+ default:
+ otg_dbg(otg, "Unknown State %d, sleeping...\n",
+ otg->state);
+ sleep_main_thread(otg);
+ break;
+ }
+
+ otg->prev = otg->state;
+ otg->state = next;
+ }
+
+ pm_runtime_mark_last_busy(otg->dev);
+ pm_runtime_put_autosuspend(otg->dev);
+ otg->main_thread = NULL;
+ otg_dbg(otg, "OTG main thread exiting....\n");
+
+ return 0;
+}
+
+static void start_main_thread(struct dwc_otg2 *otg)
+{
+ enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+ bool children_ready = false;
+
+ mutex_lock(&lock);
+
+ if ((mode == DWC3_DEVICE_ONLY) &&
+ otg->otg.gadget)
+ children_ready = true;
+
+ if ((mode == DWC3_HOST_ONLY) &&
+ otg->otg.host)
+ children_ready = true;
+
+ if ((mode == DWC3_DRD) &&
+ otg->otg.host && otg->otg.gadget)
+ children_ready = true;
+
+ if (!otg->main_thread && children_ready) {
+ otg_dbg(otg, "Starting OTG main thread\n");
+ otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+ wake_up_process(otg->main_thread);
+ }
+ mutex_unlock(&lock);
+}
+
+static void stop_main_thread(struct dwc_otg2 *otg)
+{
+ mutex_lock(&lock);
+ if (otg->main_thread) {
+ otg_dbg(otg, "Stopping OTG main thread\n");
+ otg->state = DWC_STATE_EXIT;
+ dwc3_wakeup_otg_thread(otg);
+ }
+ mutex_unlock(&lock);
+}
+
+static int dwc_otg2_set_peripheral(struct usb_otg *x,
+ struct usb_gadget *gadget)
+{
+ struct dwc_otg2 *otg;
+
+ if (!x) {
+ otg_err(otg, "otg is NULL!\n");
+ return -ENODEV;
+ }
+
+ otg = xceiv_to_dwc_otg2(x);
+ otg_dbg(otg, "\n");
+
+ if (!gadget) {
+ otg->otg.gadget = NULL;
+ stop_main_thread(otg);
+ return -ENODEV;
+ }
+
+ otg->otg.gadget = gadget;
+ otg->usb2_phy.state = OTG_STATE_B_IDLE;
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc_otg2_set_host(struct usb_otg *x, struct usb_bus *host)
+{
+ struct dwc_otg2 *otg;
+
+ if (!x) {
+ otg_dbg(otg, "otg is NULL!\n");
+ return -ENODEV;
+ }
+
+ otg = xceiv_to_dwc_otg2(x);
+ otg_dbg(otg, "\n");
+
+ if (!host) {
+ otg->otg.host = NULL;
+ stop_main_thread(otg);
+ return -ENODEV;
+ }
+
+ otg->otg.host = host;
+ start_main_thread(otg);
+ return 0;
+}
+
+static int ulpi_read(struct usb_phy *phy, u32 reg)
+{
+ struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+ u32 val32 = 0, count = 200;
+ u8 val, tmp;
+
+ reg &= 0xFF;
+
+ while (count) {
+ if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+ udelay(5);
+ else
+ break;
+
+ count--;
+ }
+
+ if (!count) {
+ otg_err(otg, "USB2 PHY always busy!!\n");
+ return -EBUSY;
+ }
+
+ count = 200;
+ /* Determine if use extend registers access */
+ if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+ otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+ val32 = GUSB2PHYACC0_NEWREGREQ
+ | GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+ | GUSB2PHYACC0_VCTRL(reg);
+ } else {
+ otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+ val32 = GUSB2PHYACC0_NEWREGREQ | GUSB2PHYACC0_REGADDR(reg)
+ | GUSB2PHYACC0_VCTRL(0x00);
+ }
+ otg_write(otg, GUSB2PHYACC0, val32);
+
+ while (count) {
+ if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+ val = otg_read(otg, GUSB2PHYACC0) &
+ GUSB2PHYACC0_REGDATA_MASK;
+ otg_dbg(otg, "%s - reg 0x%x data 0x%x\n",
+ __func__, reg, val);
+ goto cleanup;
+ }
+
+ count--;
+ }
+
+ otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+ return -ETIMEDOUT;
+
+cleanup:
+ /* Clear GUSB2PHYACC0[16:21] before return.
+ * Otherwise, it will cause PHY can't in workable
+ * state. This is one dwc3 controller silicon bug. */
+ tmp = otg_read(otg, GUSB2PHYACC0);
+ otg_write(otg, GUSB2PHYACC0, tmp &
+ ~GUSB2PHYACC0_REGADDR(0x3F));
+ return val;
+}
+
+static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+ struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+ u32 val32 = 0, count = 200;
+ u8 tmp;
+
+ val &= 0xFF;
+ reg &= 0xFF;
+
+ while (count) {
+ if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+ udelay(5);
+ else
+ break;
+
+ count--;
+ }
+
+ if (!count) {
+ otg_err(otg, "USB2 PHY always busy!!\n");
+ return -EBUSY;
+ }
+
+ count = 200;
+
+ if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+ otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+ val32 = GUSB2PHYACC0_NEWREGREQ
+ | GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+ | GUSB2PHYACC0_VCTRL(reg)
+ | GUSB2PHYACC0_REGWR | GUSB2PHYACC0_REGDATA(val);
+ } else {
+ otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+ val32 = GUSB2PHYACC0_NEWREGREQ
+ | GUSB2PHYACC0_REGADDR(reg)
+ | GUSB2PHYACC0_REGWR
+ | GUSB2PHYACC0_REGDATA(val);
+ }
+ otg_write(otg, GUSB2PHYACC0, val32);
+
+ while (count) {
+ if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+ otg_dbg(otg, "%s - reg 0x%x data 0x%x write done\n",
+ __func__, reg, val);
+ goto cleanup;
+ }
+
+ count--;
+ }
+
+ otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+ return -ETIMEDOUT;
+
+cleanup:
+ /* Clear GUSB2PHYACC0[16:21] before return.
+ * Otherwise, it will cause PHY can't in workable
+ * state. This is one dwc3 controller silicon bug. */
+ tmp = otg_read(otg, GUSB2PHYACC0);
+ otg_write(otg, GUSB2PHYACC0, tmp &
+ ~GUSB2PHYACC0_REGADDR(0x3F));
+ return 0;
+}
+
+static struct usb_phy_io_ops dwc_otg_io_ops = {
+ .read = ulpi_read,
+ .write = ulpi_write,
+};
+
+static struct dwc_otg2 *dwc3_otg_alloc(struct device *dev)
+{
+ struct dwc_otg2 *otg = NULL;
+ struct usb_phy *usb_phy;
+ int retval;
+
+ otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ otg_err(otg, "Alloc otg failed\n");
+ return NULL;
+ }
+
+ the_transceiver = otg;
+ otg->otg_data = dev->platform_data;
+
+ usb_phy = &otg->usb2_phy;
+ otg->otg.phy = usb_phy;
+ otg->usb2_phy.otg = &otg->otg;
+
+ otg->dev = dev;
+ otg->usb3_phy.dev = otg->dev;
+ otg->usb3_phy.label = "dwc-usb3-phy";
+ otg->usb3_phy.state = OTG_STATE_UNDEFINED;
+ otg->usb3_phy.otg = &otg->otg;
+ otg->usb2_phy.dev = otg->dev;
+ otg->usb2_phy.label = "dwc-usb2-phy";
+ otg->usb2_phy.state = OTG_STATE_UNDEFINED;
+ otg->usb2_phy.set_power = dwc3_otg_pdata->set_power;
+ otg->usb2_phy.get_chrg_status = dwc_otg_get_chrg_status;
+ otg->usb2_phy.io_ops = &dwc_otg_io_ops;
+ otg->usb2_phy.otg = &otg->otg;
+ otg->otg.set_host = dwc_otg2_set_host;
+ otg->otg.set_peripheral = dwc_otg2_set_peripheral;
+ ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb2_phy.notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb3_phy.notifier);
+
+ otg->state = DWC_STATE_B_IDLE;
+ spin_lock_init(&otg->lock);
+ init_waitqueue_head(&otg->main_wq);
+
+ /* Register otg notifier to monitor ID and VBus change events */
+ otg->nb.notifier_call = dwc_otg_handle_notification;
+ usb_register_notifier(&otg->usb2_phy, &otg->nb);
+
+ otg_dbg(otg, "Version: %s\n", VERSION);
+ retval = usb_add_phy(&otg->usb2_phy, USB_PHY_TYPE_USB2);
+ if (retval) {
+ otg_err(otg, "can't register transceiver, err: %d\n",
+ retval);
+ goto err1;
+ }
+
+ retval = usb_add_phy(&otg->usb3_phy, USB_PHY_TYPE_USB3);
+ if (retval) {
+ otg_err(otg, "can't register transceiver, err: %d\n",
+ retval);
+ goto err2;
+ }
+
+ return otg;
+
+err2:
+ usb_remove_phy(&otg->usb2_phy);
+
+err1:
+ kfree(otg);
+ otg = NULL;
+
+ return otg;
+}
+
+static int dwc3_otg_create_children(struct dwc_otg2 *otg,
+ struct resource *res, int num)
+{
+ struct platform_device *dwc_host, *dwc_gadget;
+ enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+ int retval = 0, i;
+
+ if (!otg || !res)
+ return -EINVAL;
+
+ if (num != 2)
+ return -EINVAL;
+
+ dwc_host = dwc_gadget = NULL;
+
+ for (i = 0; i < 2; i++) {
+ if (res[i].flags == IORESOURCE_MEM) {
+ otg->usb2_phy.io_priv = ioremap_nocache(
+ res[i].start, res[i].end - res[i].start);
+ if (!otg->usb2_phy.io_priv) {
+ otg_err(otg, "dwc3 otg ioremap failed\n");
+ return -ENOMEM;
+ }
+ break;
+ }
+ }
+
+ /* resource have no mem io resource */
+ if (!otg->usb2_phy.io_priv)
+ return -EINVAL;
+
+ platform_par = kzalloc(sizeof(*platform_par), GFP_KERNEL);
+ if (!platform_par) {
+ otg_err(otg, "alloc dwc_device_par failed\n");
+ goto err1;
+ }
+
+ platform_par->io_addr = otg->usb2_phy.io_priv;
+ platform_par->len = res[i].end - res[i].start;
+
+ if (mode == DWC3_DEVICE_ONLY)
+ goto device_only;
+
+ dwc_host = platform_device_alloc(DWC3_HOST_NAME,
+ HOST_DEVID);
+ if (!dwc_host) {
+ otg_err(otg, "couldn't allocate dwc3 host device\n");
+ goto err2;
+ }
+
+ retval = platform_device_add_resources(dwc_host, res, num);
+ if (retval) {
+ otg_err(otg, "couldn't add resources to dwc3 device\n");
+ goto err3;
+ }
+
+ platform_device_add_data(dwc_host, platform_par,
+ sizeof(struct dwc_device_par));
+
+ dwc_host->dev.dma_mask = otg->dev->dma_mask;
+ dwc_host->dev.dma_parms = otg->dev->dma_parms;
+ dwc_host->dev.parent = otg->dev;
+
+ retval = platform_device_add(dwc_host);
+ if (retval) {
+ otg_err(otg, "failed to register dwc3 host\n");
+ goto err1;
+ }
+
+ otg->host = dwc_host;
+
+ if (mode != DWC3_DRD)
+ return 0;
+
+device_only:
+ dwc_gadget = platform_device_alloc(DWC3_DEVICE_NAME,
+ GADGET_DEVID);
+ if (!dwc_gadget) {
+ otg_err(otg, "couldn't allocate dwc3 device\n");
+ goto err3;
+ }
+
+ retval = platform_device_add_resources(dwc_gadget,
+ res, num);
+ if (retval) {
+ otg_err(otg, "couldn't add resources to dwc3 device\n");
+ goto err3;
+ }
+
+ dwc_gadget->dev.dma_mask = otg->dev->dma_mask;
+ dwc_gadget->dev.dma_parms = otg->dev->dma_parms;
+ dwc_gadget->dev.parent = otg->dev;
+
+ platform_device_add_data(dwc_gadget, platform_par,
+ sizeof(struct dwc_device_par));
+ retval = platform_device_add(dwc_gadget);
+ if (retval) {
+ otg_err(otg, "failed to register dwc3 gadget\n");
+ goto err3;
+ }
+ otg->gadget = dwc_gadget;
+
+ return 0;
+
+err3:
+ if (mode == DWC3_DRD)
+ platform_device_unregister(dwc_host);
+
+err2:
+ kfree(platform_par);
+
+err1:
+ iounmap(otg->usb2_phy.io_priv);
+
+ return retval;
+}
+
+#ifdef CONFIG_PCI
+
+static int dwc_otg_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int retval = 0;
+ struct resource res[2];
+ struct dwc_otg2 *otg = NULL;
+ unsigned long resource, len;
+
+ if (!dwc3_otg_pdata)
+ return -ENODEV;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "pci device enable failed\n");
+ return -ENODEV;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+
+ otg = dwc3_otg_alloc(&pdev->dev);
+ if (!otg) {
+ otg_err(otg, "dwc3 otg init failed\n");
+ goto err;
+ }
+
+ /* control register: BAR 0 */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ otg_err(otg, "Request memory region failed\n");
+ retval = -EBUSY;
+ goto err;
+ }
+
+ otg_dbg(otg, "dwc otg pci resouce: 0x%lu, len: 0x%lu\n",
+ resource, len);
+ otg_dbg(otg, "vendor: 0x%x, device: 0x%x\n",
+ pdev->vendor, pdev->device);
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pdev, 0);
+ res[0].end = pci_resource_end(pdev, 0);
+ res[0].name = "dwc_usb3_io";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pdev->irq;
+ res[1].name = "dwc_usb3_irq";
+ res[1].flags = IORESOURCE_IRQ;
+
+ retval = dwc3_otg_create_children(otg, res, ARRAY_SIZE(res));
+ if (retval) {
+ otg_err(otg, "dwc3 otg create alloc children failed\n");
+ goto err;
+ }
+
+ otg->irqnum = pdev->irq;
+
+ if (dwc3_otg_pdata->platform_init) {
+ retval = dwc3_otg_pdata->platform_init(otg);
+ if (retval)
+ goto err;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_mark_last_busy(otg->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err:
+ if (the_transceiver)
+ dwc_otg_remove(pdev);
+
+ return retval;
+}
+
+static void dwc_otg_remove(struct pci_dev *pdev)
+{
+ struct dwc_otg2 *otg = the_transceiver;
+ int resource, len;
+
+ if (otg->gadget)
+ platform_device_unregister(otg->gadget);
+ if (otg->host)
+ platform_device_unregister(otg->host);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ kfree(platform_par);
+ iounmap(otg->usb2_phy.io_priv);
+
+ usb_remove_phy(&otg->usb2_phy);
+ usb_remove_phy(&otg->usb3_phy);
+ kfree(otg);
+ otg = NULL;
+
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ release_mem_region(resource, len);
+
+ pci_disable_device(pdev);
+
+ the_transceiver = NULL;
+}
+
+static void dwc_otg_shutdown(struct pci_dev *pdev)
+{
+ struct dwc_otg2 *otg = the_transceiver;
+
+ /* stop main thread, ignore notification events */
+ stop_main_thread(otg);
+
+ pci_disable_device(pdev);
+}
+
+static int dwc_otg_runtime_idle(struct device *dev)
+{
+ if (dwc3_otg_pdata->idle)
+ return dwc3_otg_pdata->idle(the_transceiver);
+
+ return 0;
+}
+
+static int dwc_otg_runtime_suspend(struct device *dev)
+{
+ if (dwc3_otg_pdata->suspend)
+ return dwc3_otg_pdata->suspend(the_transceiver);
+
+ return 0;
+}
+
+static int dwc_otg_runtime_resume(struct device *dev)
+{
+ if (dwc3_otg_pdata->resume)
+ return dwc3_otg_pdata->resume(the_transceiver);
+ return 0;
+}
+
+static int dwc_otg_suspend(struct device *dev)
+{
+ if (dwc3_otg_pdata->suspend)
+ return dwc3_otg_pdata->suspend(the_transceiver);
+ return 0;
+}
+
+static int dwc_otg_resume(struct device *dev)
+{
+ if (dwc3_otg_pdata->resume)
+ return dwc3_otg_pdata->resume(the_transceiver);
+ return 0;
+}
+
+static const struct dev_pm_ops dwc_usb_otg_pm_ops = {
+ .runtime_suspend = dwc_otg_runtime_suspend,
+ .runtime_resume = dwc_otg_runtime_resume,
+ .runtime_idle = dwc_otg_runtime_idle,
+ .suspend = dwc_otg_suspend,
+ .resume = dwc_otg_resume
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ { PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x20), ~0),
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_DWC,
+ },
+ { PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x80), ~0),
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_DWC,
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver dwc_otg_pci_driver = {
+ .name = (char *) driver_name,
+ .id_table = pci_ids,
+ .probe = dwc_otg_probe,
+ .remove = dwc_otg_remove,
+ .shutdown = dwc_otg_shutdown,
+ .driver = {
+ .name = (char *) driver_name,
+ .pm = &dwc_usb_otg_pm_ops,
+ .owner = THIS_MODULE,
+ },
+};
+#endif
+
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata)
+{
+ int retval;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (dwc3_otg_pdata)
+ return -EBUSY;
+
+ dwc3_otg_pdata = pdata;
+
+#ifdef CONFIG_PCI
+ retval = pci_register_driver(&dwc_otg_pci_driver);
+#endif
+ mutex_init(&lock);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_register);
+
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata)
+{
+ if (!pdata)
+ return -EINVAL;
+
+ if (dwc3_otg_pdata != pdata)
+ return -EINVAL;
+
+ dwc3_otg_pdata = NULL;
+
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&dwc_otg_pci_driver);
+#endif
+ mutex_destroy(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_unregister);
+
+static int __init dwc_otg_init(void)
+{
+ return 0;
+}
+module_init(dwc_otg_init);
+
+static void __exit dwc_otg_exit(void)
+{
+}
+module_exit(dwc_otg_exit);
+
+MODULE_AUTHOR("Synopsys, Inc and Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
--- /dev/null
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_OTG_H
+#define __DWC3_OTG_H
+
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <linux/power_supply.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ulpi.h>
+
+
+struct dwc_device_par {
+ void __iomem *io_addr;
+ int len;
+};
+
+#define DWC3_DEVICE_NAME "dwc3-device"
+#define DWC3_HOST_NAME "dwc3-host"
+#define GADGET_DEVID 1
+#define HOST_DEVID 2
+#define DRIVER_VERSION "0.1"
+
+#ifdef CONFIG_USB_DWC3_OTG_DEBUG
+#define DWC_OTG_DEBUG 1
+#else
+#define DWC_OTG_DEBUG 0
+#endif
+
+#define otg_dbg(d, fmt, args...) \
+ do { if (DWC_OTG_DEBUG) dev_dbg((d)->dev, \
+ "%s(): " fmt , __func__, ## args); } while (0)
+#define otg_vdbg(d, fmt, args...) \
+ do { if (DWC_OTG_DEBUG) dev_dbg((d)->dev, \
+ "%s(): " fmt , __func__, ## args); } while (0)
+#define otg_err(d, fmt, args...) \
+ do { if (DWC_OTG_DEBUG) dev_err((d)->dev, \
+ "%s(): " fmt , __func__, ## args); } while (0)
+#define otg_warn(d, fmt, args...) \
+ do { if (DWC_OTG_DEBUG) dev_warn((d)->dev, \
+ "%s(): " fmt , __func__, ## args); } while (0)
+#define otg_info(d, fmt, args...) \
+ do { if (DWC_OTG_DEBUG) dev_info((d)->dev, \
+ "%s(): " fmt , __func__, ## args); } while (0)
+
+#ifdef DEBUG
+#define otg_write(o, reg, val) do { \
+ otg_dbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+ writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg); \
+ } while (0)
+
+#define otg_read(o, reg) ({ \
+ u32 __r; \
+ __r = readl(((void *)((o)->usb2_phy.io_priv)) + reg); \
+ otg_dbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+ __r; \
+ })
+#else
+#define otg_write(o, reg, val) \
+ writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg);
+
+#define otg_read(o, reg) ({ \
+ readl(((void *)((o)->usb2_phy.io_priv)) + reg); \
+ })
+#endif
+
+#define GUSB2PHYCFG0 0xc200
+#define GUSB2PHYCFG_SUS_PHY 0x40
+#define GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define GUSB2PHYCFG_ULPI_AUTO_RESUME (1 << 15)
+#define GUSB2PHYCFG_ULPI_EXT_VBUS_DRV (1 << 17)
+
+#define EXTEND_ULPI_REGISTER_ACCESS_MASK 0xC0
+#define GUSB2PHYACC0 0xc280
+#define GUSB2PHYACC0_DISULPIDRVR (1 << 26)
+#define GUSB2PHYACC0_NEWREGREQ (1 << 25)
+#define GUSB2PHYACC0_VSTSDONE (1 << 24)
+#define GUSB2PHYACC0_VSTSBSY (1 << 23)
+#define GUSB2PHYACC0_REGWR (1 << 22)
+#define GUSB2PHYACC0_REGADDR(v) ((v & 0x3F) << 16)
+#define GUSB2PHYACC0_EXTREGADDR(v) ((v & 0x3F) << 8)
+#define GUSB2PHYACC0_VCTRL(v) ((v & 0xFF) << 8)
+#define GUSB2PHYACC0_REGDATA(v) (v & 0xFF)
+#define GUSB2PHYACC0_REGDATA_MASK 0xFF
+
+#define GUSB3PIPECTL0 0xc2c0
+#define GUSB3PIPECTL_SUS_EN 0x20000
+#define GUSB3PIPE_DISRXDETP3 (1 << 28)
+#define GUSB3PIPECTL_PHYSOFTRST (1 << 31)
+
+#define GHWPARAMS6 0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED 0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED 0x0800
+#define GHWPARAMS6_ADP_SUPPORT_ENABLED 0x1000
+
+#define GUCTL 0xC12C
+#define GUCTL_CMDEVADDR (1 << 15)
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+#define GCTL_CORESOFTRESET (1 << 11)
+#define GCTL_PWRDNSCALE(x) (x << 19)
+#define GCTL_PWRDNSCALE_MASK (0x1fff << 19)
+
+#define OCFG 0xcc00
+#define OCFG_SRP_CAP 0x01
+#define OCFG_SRP_CAP_SHIFT 0
+#define OCFG_HNP_CAP 0x02
+#define OCFG_HNP_CAP_SHIFT 1
+#define OCFG_OTG_VERSION 0x04
+#define OCFG_OTG_VERSION_SHIFT 2
+
+#define GCTL 0xc110
+#define OCTL 0xcc04
+#define OCTL_HST_SET_HNP_EN 0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT 0
+#define OCTL_DEV_SET_HNP_EN 0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT 1
+#define OCTL_TERM_SEL_DL_PULSE 0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT 2
+#define OCTL_SES_REQ 0x08
+#define OCTL_SES_REQ_SHIFT 3
+#define OCTL_HNP_REQ 0x10
+#define OCTL_HNP_REQ_SHIFT 4
+#define OCTL_PRT_PWR_CTL 0x20
+#define OCTL_PRT_PWR_CTL_SHIFT 5
+#define OCTL_PERI_MODE 0x40
+#define OCTL_PERI_MODE_SHIFT 6
+
+#define OEVT 0xcc08
+#define OEVT_ERR 0x00000001
+#define OEVT_ERR_SHIFT 0
+#define OEVT_SES_REQ_SCS 0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT 1
+#define OEVT_HST_NEG_SCS 0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT 2
+#define OEVT_B_SES_VLD_EVT 0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT 3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT 0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT 8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT 0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT 9
+#define OEVT_B_DEV_HNP_CHNG_EVNT 0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT 10
+#define OEVT_B_DEV_B_HOST_END_EVNT 0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT 11
+#define OEVT_A_DEV_SESS_END_DET_EVNT 0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT 16
+#define OEVT_A_DEV_SRP_DET_EVNT 0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT 17
+#define OEVT_A_DEV_HNP_CHNG_EVNT 0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT 18
+#define OEVT_A_DEV_HOST_EVNT 0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT 19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT 0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT 20
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT 0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT 22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT 0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT 23
+#define OEVT_CONN_ID_STS_CHNG_EVNT 0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT 24
+#define OEVT_DEV_MOD_EVNT 0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT 31
+
+#define OEVTEN 0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+ OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+ OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+ OEVT_A_DEV_HOST_EVNT | \
+ OEVT_A_DEV_HNP_CHNG_EVNT | \
+ OEVT_A_DEV_SRP_DET_EVNT | \
+ OEVT_A_DEV_SESS_END_DET_EVNT | \
+ OEVT_B_DEV_B_HOST_END_EVNT | \
+ OEVT_B_DEV_HNP_CHNG_EVNT | \
+ OEVT_B_DEV_SES_VLD_DET_EVNT | \
+ OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS 0xcc10
+#define OSTS_CONN_ID_STS 0x0001
+#define OSTS_CONN_ID_STS_SHIFT 0
+#define OSTS_A_SES_VLD 0x0002
+#define OSTS_A_SES_VLD_SHIFT 1
+#define OSTS_B_SES_VLD 0x0004
+#define OSTS_B_SES_VLD_SHIFT 2
+#define OSTS_XHCI_PRT_PWR 0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT 3
+#define OSTS_PERIP_MODE 0x0010
+#define OSTS_PERIP_MODE_SHIFT 4
+#define OSTS_OTG_STATES 0x0f00
+#define OSTS_OTG_STATE_SHIFT 8
+
+#define ADPCFG 0xcc20
+#define ADPCFG_PRB_DSCHGS 0x0c000000
+#define ADPCFG_PRB_DSCHG_SHIFT 26
+#define ADPCFG_PRB_DELTAS 0x30000000
+#define ADPCFG_PRB_DELTA_SHIFT 28
+#define ADPCFG_PRB_PERS 0xc0000000
+#define ADPCFG_PRB_PER_SHIFT 30
+
+#define ADPCTL 0xcc24
+#define ADPCTL_WB 0x01000000
+#define ADPCTL_WB_SHIFT 24
+#define ADPCTL_ADP_RES 0x02000000
+#define ADPCTL_ADP_RES_SHIFT 25
+#define ADPCTL_ADP_EN 0x04000000
+#define ADPCTL_ADP_EN_SHIFT 26
+#define ADPCTL_ENA_SNS 0x08000000
+#define ADPCTL_ENA_SNS_SHIFT 27
+#define ADPCTL_ENA_PRB 0x10000000
+#define ADPCTL_ENA_PRB_SHIFT 28
+
+#define ADPEVT 0xcc28
+#define ADPEVT_RTIM_EVNTS 0x000007ff
+#define ADPEVT_RTIM_EVNT_SHIFT 0
+#define ADPEVT_ADP_RST_CMPLT_EVNT 0x02000000
+#define ADPEVT_ADP_RST_CMPLT_EVNT_SHIFT 25
+#define ADPEVT_ADP_TMOUT_EVNT 0x04000000
+#define ADPEVT_ADP_TMOUT_EVNT_SHIFT 26
+#define ADPEVT_ADP_SNS_EVNT 0x08000000
+#define ADPEVT_ADP_SNS_EVNT_SHIFT 27
+#define ADPEVT_ADP_PRB_EVNT 0x10000000
+#define ADPEVT_ADP_PRB_EVNT_SHIFT 28
+
+#define ADPEVTEN 0xcc2c
+#define ADPEVTEN_ACC_DONE_EN 0x01000000
+#define ADPEVTEN_ACC_DONE_EN_SHIFT 24
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN 0x02000000
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN_SHIFT 25
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN 0x04000000
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN_SHIFT 26
+#define ADPEVTEN_ADP_SNS_EVNT_EN 0x08000000
+#define ADPEVTEN_ADP_SNS_EVNT_EN_SHIFT 27
+#define ADPEVTEN_ADP_PRB_EVNT_EN 0x10000000
+#define ADPEVTEN_ADP_PRB_EVNT_EN_SHIFT 28
+
+#define RID_A 0x01
+#define RID_B 0x02
+#define RID_C 0x03
+#define RID_FLOAT 0x04
+#define RID_GND 0x05
+#define RID_UNKNOWN 0x00
+
+/** The states for the OTG driver */
+enum dwc_otg_state {
+ DWC_STATE_INVALID = -1,
+
+ /** The initial state, check the connector
+ * id status and determine what mode
+ * (A-device or B-device) to operate in. */
+ DWC_STATE_B_IDLE = 0,
+
+ /* A-Host states */
+ DWC_STATE_A_PROBE,
+ DWC_STATE_A_HOST,
+ DWC_STATE_A_HNP_INIT,
+
+ /* A-Peripheral states */
+ DWC_STATE_A_PERIPHERAL,
+
+ /* B-Peripheral states */
+ DWC_STATE_B_SENSE,
+ DWC_STATE_B_PROBE,
+ DWC_STATE_B_PERIPHERAL,
+ DWC_STATE_B_HNP_INIT,
+
+ /* B-Host states */
+ DWC_STATE_B_HOST,
+
+ /* RSP */
+ DWC_STATE_B_RSP_INIT,
+
+ /* USB charger detection */
+ DWC_STATE_CHARGER_DETECTION,
+
+ /* VBUS */
+ DWC_STATE_WAIT_VBUS_RAISE,
+ DWC_STATE_WAIT_VBUS_FALL,
+
+ /* Charging*/
+ DWC_STATE_CHARGING,
+
+ /* Exit */
+ DWC_STATE_EXIT,
+ DWC_STATE_TERMINATED
+};
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc_otg2 {
+ /** OTG transceiver */
+ struct usb_otg otg;
+ struct usb_phy usb2_phy;
+ struct usb_phy usb3_phy;
+ struct device *dev;
+ int irqnum;
+
+ int main_wakeup_needed;
+ struct task_struct *main_thread;
+ wait_queue_head_t main_wq;
+
+ spinlock_t lock;
+
+ /* Events */
+ u32 otg_events;
+ u32 user_events;
+
+ /** User space ID switch event */
+#define USER_ID_A_CHANGE_EVENT 0x01
+#define USER_ID_B_CHANGE_EVENT 0x02
+ /** a_bus_drop event from userspace */
+#define USER_A_BUS_DROP 0x40
+
+ /* States */
+ enum dwc_otg_state prev;
+ enum dwc_otg_state state;
+ struct platform_device *host;
+ struct platform_device *gadget;
+
+ /* Charger detection */
+ struct power_supply_cable_props charging_cap;
+ struct notifier_block nb;
+
+ /* Interfaces between host/device driver */
+ int (*start_host) (struct usb_hcd *hcd);
+ int (*stop_host) (struct usb_hcd *hcd);
+ int (*start_device)(struct usb_gadget *);
+ int (*stop_device)(struct usb_gadget *);
+ int (*vbus_draw) (struct usb_gadget *, unsigned ma);
+
+ /* Vendor driver private date */
+ void *otg_data;
+};
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+ int __timeout = msecs; \
+ while (!(condition)) { \
+ otg_dbg(otg, " ... sleeping for %d\n", __timeout); \
+ __timeout = sleep_main_thread_timeout(otg, __timeout); \
+ if (__timeout <= 0) { \
+ break; \
+ } \
+ } \
+ __timeout; \
+ })
+
+#define sleep_main_thread_until_condition(otg, condition) ({ \
+ int __rc = 0; \
+ do { \
+ __rc = sleep_main_thread_until_condition_timeout(otg, \
+ condition, 50000); \
+ } while (__rc == 0); \
+ __rc; \
+ })
+
+#define VBUS_TIMEOUT 300
+#define PCI_DEVICE_ID_DWC 0x119E
+
+enum dwc3_otg_mode {
+ DWC3_DEVICE_ONLY,
+ DWC3_HOST_ONLY,
+ DWC3_DRD,
+};
+
+enum driver_bus_type {
+ DWC3_PLAT,
+ DWC3_PCI,
+};
+
+struct dwc3_otg_hw_ops {
+ enum dwc3_otg_mode mode;
+ enum driver_bus_type bus;
+
+ int (*set_power)(struct usb_phy *_otg, unsigned ma);
+ int (*platform_init)(struct dwc_otg2 *otg);
+ int (*otg_notifier_handler)(struct notifier_block *nb,
+ unsigned long event, void *data);
+ int (*prepare_start_peripheral)(struct dwc_otg2 *otg);
+ int (*prepare_start_host)(struct dwc_otg2 *otg);
+ int (*after_stop_peripheral)(struct dwc_otg2 *otg);
+ int (*after_stop_host)(struct dwc_otg2 *otg);
+ int (*b_idle)(struct dwc_otg2 *otg);
+ int (*do_charging)(struct dwc_otg2 *otg);
+ int (*notify_charger_type)(struct dwc_otg2 *otg,
+ enum power_supply_charger_event event);
+ enum power_supply_charger_cable_type
+ (*get_charger_type)(struct dwc_otg2 *otg);
+ int (*enable_vbus)(struct dwc_otg2 *otg, int enable);
+ int (*get_id)(struct dwc_otg2 *otg);
+
+ int (*idle)(struct dwc_otg2 *otg);
+ int (*suspend)(struct dwc_otg2 *otg);
+ int (*resume)(struct dwc_otg2 *otg);
+};
+
+#define OTG_USB2_100MA 0xfff1
+#define OTG_USB3_150MA 0xfff2
+#define OTG_USB2_500MA 0xfff3
+#define OTG_USB3_900MA 0xfff4
+#define OTG_DEVICE_SUSPEND 0xfffe
+#define OTG_DEVICE_RESUME 0xffff
+
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg);
+struct dwc_otg2 *dwc3_get_otg(void);
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata);
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata);
+#endif /* __DWC3_OTG_H */
ep->address = desc->bEndpointAddress;
return 1;
}
+EXPORT_SYMBOL_GPL(ep_matches);
+
static struct usb_ep *
find_ep (struct usb_gadget *gadget, const char *name)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(find_ep);
/**
* usb_ep_autoconfig_ss() - choose an endpoint matching the ep
{
struct usb_ep *ep;
u8 type;
+#ifdef CONFIG_USB_DWC3_GADGET
+ u8 addr;
+ addr = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+#endif
type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
/* First, apply chip-specific "best usage" knowledge.
if (ep && ep_matches(gadget, ep, desc, ep_comp))
goto found_ep;
#endif
+
+#ifdef CONFIG_USB_DWC3_GADGET
+ } else if (gadget_is_middwc3tng(gadget)) {
+ if (addr == 0x1) {
+ /* statically assigned ebc-ep1 in/out */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ & USB_DIR_IN)
+ ep = find_ep(gadget, "ep1in");
+ else
+ ep = NULL;
+ } else if (addr == 0x8) {
+ /* statically assigned ebc-ep8 in/out */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ & USB_DIR_IN)
+ ep = find_ep (gadget, "ep8in");
+ else
+ ep = find_ep (gadget, "ep8out");
+ } else
+ ep = NULL;
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
+ goto found_ep;
+#endif
+
}
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+
+#ifdef CONFIG_USB_DWC3_GADGET
+ /* ep1in and ep8in are reserved for DWC3 device controller */
+ if (!strncmp(ep->name, "ep1in", 5) ||
+ !strncmp(ep->name, "ep8in", 5))
+ continue;
+ if (gadget_is_middwc3tng(gadget))
+ /* ep1out and ep8out are also reserved */
+ if (!strncmp(ep->name, "ep1out", 6) ||
+ !strncmp(ep->name, "ep8out", 6))
+ continue;
+#endif
if (ep_matches(gadget, ep, desc, ep_comp))
goto found_ep;
}
--- /dev/null
+/*
+ * Gadget Driver for Android DvC.Dfx Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+#include <asm/intel_soc_debug.h>
+
+#define DFX_RX_REQ_MAX 1
+#define DFX_TX_REQ_MAX 2
+#define DFX_BULK_REQ_SIZE 64
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_dfx_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+ u8 ctrl_id, data_id;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ int transfering;
+ int online;
+ int online_ctrl;
+ int online_data;
+ int error;
+
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t open_excl;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+
+ struct usb_request *rx_req[DFX_RX_REQ_MAX];
+
+ struct list_head tx_idle;
+ struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor dfx_iad_desc = {
+ .bLength = sizeof(dfx_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* debug control + data */
+ .bFunctionClass = USB_CLASS_DEBUG,
+ .bFunctionSubClass = USB_SUBCLASS_DVC_DFX,
+ /* .bFunctionProtocol = DC_PROTOCOL_VENDOR, */
+ /* .iFunction = 0, */
+};
+
+static struct usb_interface_descriptor dfx_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_DEBUG,
+ .bInterfaceSubClass = USB_SUBCLASS_DEBUG_CONTROL,
+ /* .bInterfaceProtocol = DC_PROTOCOL_VENDOR, */
+};
+
+#define DC_DBG_ATTRI_LENGTH DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DBG_TOTAL_LENGTH (DC_DBG_ATTRI_LENGTH)
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCD, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCD) dfx_debug_attri_desc = {
+ .bLength = DC_DBG_ATTRI_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = DC_DEBUG_ATTRIBUTES,
+ .bcdDC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(DC_DBG_TOTAL_LENGTH),
+ .bmSupportedFeatures = 0, /* Debug Event Supported, per SAS */
+ .bControlSize = 2,
+ .bmControl = { /* per SAS */
+ [0] = 0xFF,
+ [1] = 0x3F,
+ },
+ .wAuxDataSize = __constant_cpu_to_le16(0x20),
+/* per SAS v0.3*/
+ .dInputBufferSize = __constant_cpu_to_le32(0x40),
+ .dOutputBufferSize = __constant_cpu_to_le32(0x80),
+ .qBaseAddress = 0, /* revision */
+ .hGlobalID = { /* revision */
+ [0] = 0,
+ [1] = 0,
+ }
+};
+
+static struct usb_interface_descriptor dfx_data_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_DEBUG,
+ .bInterfaceSubClass = USB_SUBCLASS_DVC_DFX,
+ /* .bInterfaceProtocol = DC_PROTOCOL_VENDOR, */
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_in_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_out_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+};
+
+/* no INPUT/OUTPUT CONNECTION and UNIT descriptors for DvC.DFx */
+static struct usb_descriptor_header *fs_dfx_descs[] = {
+ (struct usb_descriptor_header *) &dfx_iad_desc,
+ (struct usb_descriptor_header *) &dfx_data_interface_desc,
+ (struct usb_descriptor_header *) &dfx_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &dfx_fullspeed_out_desc,
+
+ (struct usb_descriptor_header *) &dfx_interface_desc,
+ (struct usb_descriptor_header *) &dfx_debug_attri_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_dfx_descs[] = {
+ (struct usb_descriptor_header *) &dfx_iad_desc,
+ (struct usb_descriptor_header *) &dfx_data_interface_desc,
+ (struct usb_descriptor_header *) &dfx_highspeed_in_desc,
+ (struct usb_descriptor_header *) &dfx_highspeed_out_desc,
+
+ (struct usb_descriptor_header *) &dfx_interface_desc,
+ (struct usb_descriptor_header *) &dfx_debug_attri_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_dfx_descs[] = {
+ (struct usb_descriptor_header *) &dfx_iad_desc,
+ (struct usb_descriptor_header *) &dfx_data_interface_desc,
+ (struct usb_descriptor_header *) &dfx_superspeed_in_desc,
+ (struct usb_descriptor_header *) &dfx_superspeed_in_comp_desc,
+ (struct usb_descriptor_header *) &dfx_superspeed_out_desc,
+ (struct usb_descriptor_header *) &dfx_superspeed_out_comp_desc,
+
+ (struct usb_descriptor_header *) &dfx_interface_desc,
+ (struct usb_descriptor_header *) &dfx_debug_attri_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+#define DVCDFX_CTRL_IDX 0
+#define DVCDFX_DATA_IDX 1
+#define DVCDFX_IAD_IDX 2
+
+/* static strings, in UTF-8 */
+static struct usb_string dfx_string_defs[] = {
+ [DVCDFX_CTRL_IDX].s = "Debug Sub-Class DvC.DFx (Control)",
+ [DVCDFX_DATA_IDX].s = "Debug Sub-Class DvC.DFx (Data)",
+ [DVCDFX_IAD_IDX].s = "Debug Sub-Class DvC.DFx",
+ { /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings dfx_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = dfx_string_defs,
+};
+
+static struct usb_gadget_strings *dfx_strings[] = {
+ &dfx_string_table,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* temporary variable used between dvc_dfx_open() and dvc_dfx_gadget_bind() */
+static struct dvc_dfx_dev *_dvc_dfx_dev;
+
+static inline struct dvc_dfx_dev *func_to_dvc_dfx(struct usb_function *f)
+{
+ return container_of(f, struct dvc_dfx_dev, function);
+}
+
+static int dvc_dfx_is_enabled(void)
+{
+ if ((!cpu_has_debug_feature(DEBUG_FEATURE_USB3DFX)) ||
+ (!stm_is_enabled())) {
+ pr_info("%s STM and/or USB3DFX is not enabled\n", __func__);
+ return 0;
+ }
+ return 1;
+}
+
+static struct usb_request *dvc_dfx_request_new(struct usb_ep *ep,
+ int buffer_size, dma_addr_t dma)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+
+ req->dma = dma;
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void dvc_dfx_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void dvc_dfx_req_put(struct dvc_dfx_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_dfx_req_get(struct dvc_dfx_dev *dev,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void dvc_dfx_set_disconnected(struct dvc_dfx_dev *dev)
+{
+ dev->transfering = 0;
+}
+
+static void dvc_dfx_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+ if (req->status != 0)
+ dvc_dfx_set_disconnected(dev);
+
+ dvc_dfx_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void dvc_dfx_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+ if (req->status != 0)
+ dvc_dfx_set_disconnected(dev);
+ wake_up(&dev->read_wq);
+}
+
+
+static inline int dvc_dfx_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void dvc_dfx_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static int dfx_create_bulk_endpoints(struct dvc_dfx_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_ss_ep_comp_descriptor *in_comp_desc,
+ struct usb_ss_ep_comp_descriptor *out_comp_desc
+ )
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ pr_debug("%s dev: %p\n", __func__, dev);
+
+ in_desc->bEndpointAddress |= 0x8;
+ ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+ if (!ep) {
+ pr_debug("%s for ep_in failed\n", __func__);
+ return -ENODEV;
+ }
+ pr_debug("%s for ep_in got %s\n", __func__, ep->name);
+
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ out_desc->bEndpointAddress |= 0x8;
+ ep = usb_ep_autoconfig_ss(cdev->gadget, out_desc, out_comp_desc);
+ if (!ep) {
+ pr_debug("%s for ep_out failed\n", __func__);
+ return -ENODEV;
+ }
+ pr_debug("%s for ep_out got %s\n", __func__, ep->name);
+
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < DFX_TX_REQ_MAX; i++) {
+ if (!(i % 2))
+ req = dvc_dfx_request_new(dev->ep_in,
+ DFX_BULK_BUFFER_SIZE,
+ (dma_addr_t)DFX_BULK_IN_BUFFER_ADDR);
+ else
+ req = dvc_dfx_request_new(dev->ep_in,
+ DFX_BULK_BUFFER_SIZE,
+ (dma_addr_t)DFX_BULK_IN_BUFFER_ADDR_2);
+ if (!req)
+ goto fail;
+ req->complete = dvc_dfx_complete_in;
+ dvc_dfx_req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < DFX_RX_REQ_MAX; i++) {
+ req = dvc_dfx_request_new(dev->ep_out, DFX_BULK_BUFFER_SIZE,
+ (dma_addr_t)DFX_BULK_OUT_BUFFER_ADDR);
+ if (!req)
+ goto fail;
+ req->complete = dvc_dfx_complete_out;
+ dev->rx_req[i] = req;
+ }
+
+ return 0;
+
+fail:
+ pr_err("%s could not allocate requests\n", __func__);
+ while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+ dvc_dfx_request_free(req, dev->ep_out);
+ for (i = 0; i < DFX_RX_REQ_MAX; i++)
+ dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+ return -1;
+}
+
+static ssize_t dvc_dfx_start_transfer(size_t count)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = -ENODEV;
+
+
+ pr_info("%s start\n", __func__);
+ if (!_dvc_dfx_dev)
+ return ret;
+
+ if (dvc_dfx_lock(&dev->read_excl)
+ && dvc_dfx_lock(&dev->write_excl))
+ return -EBUSY;
+
+ /* we will block until enumeration completes */
+ while (!(dev->online || dev->error)) {
+ pr_debug("%s waiting for online state\n", __func__);
+ ret = wait_event_interruptible(dev->read_wq,
+ (dev->online || dev->error));
+
+ if (ret < 0) {
+ /* not at CONFIGURED state */
+ pr_info("%s USB not at CONFIGURED\n", __func__);
+ dvc_dfx_unlock(&dev->read_excl);
+ dvc_dfx_unlock(&dev->write_excl);
+ return ret;
+ }
+ }
+
+ /* queue a ep_in endless request */
+ while (r > 0) {
+ if (dev->error) {
+ pr_debug("%s dev->error\n", __func__);
+ r = -EIO;
+ break;
+ }
+
+ if (!dev->online) {
+ pr_debug("%s !dev->online issue\n", __func__);
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ dev->error || !dev->online ||
+ (req = dvc_dfx_req_get(dev, &dev->tx_idle)));
+
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ if (req != 0) {
+ if (count > DFX_BULK_BUFFER_SIZE)
+ xfer = DFX_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+
+ req->no_interrupt = 1;
+ req->context = &dev->function;
+ req->length = xfer;
+ pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+ __func__);
+ ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("%s xfer error %d\n", __func__, ret);
+ dev->error = 1;
+ r = -EIO;
+ break;
+ }
+ pr_debug("%s xfer=%d/%d queued req/%x\n", __func__,
+ xfer, r, (uint)req);
+ dvc_dfx_req_put(dev, &dev->tx_xfer, req);
+ r -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+ }
+ if (req) {
+ pr_debug("%s req re-added to tx_idle on error\n", __func__);
+ dvc_dfx_req_put(dev, &dev->tx_idle, req);
+ }
+
+ pr_debug("%s rx_req to dev->ep_out\n", __func__);
+ /* queue a ep_out endless request */
+ req = dev->rx_req[0];
+ req->length = DFX_BULK_BUFFER_SIZE;
+ req->no_interrupt = 1;
+ req->context = &dev->function;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("%s failed to queue out req %p (%d)\n",
+ __func__, req, req->length);
+ r = -EIO;
+ } else {
+ dev->transfering = 1;
+ }
+
+ dvc_dfx_unlock(&dev->read_excl);
+ dvc_dfx_unlock(&dev->write_excl);
+ pr_debug("%s returning\n", __func__);
+ return r;
+}
+
+static int dvc_dfx_disable_transfer(void)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+ struct usb_request *req;
+ int r = 1;
+ int ret;
+
+
+ pr_info("%s start\n", __func__);
+ if (!_dvc_dfx_dev)
+ return -ENODEV;
+
+ if (dvc_dfx_lock(&dev->read_excl)
+ && dvc_dfx_lock(&dev->write_excl))
+ return -EBUSY;
+
+ if (dev->error) {
+ pr_debug("%s dev->error\n", __func__);
+ r = -EIO;
+ goto end;
+ }
+
+ if ((!dev->online) || (!dev->transfering)) {
+ pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+ r = -EIO;
+ goto end;
+ }
+
+ /* get an xfer tx request to use */
+ while ((req = dvc_dfx_req_get(dev, &dev->tx_xfer))) {
+ ret = usb_ep_dequeue(dev->ep_in, req);
+ if (ret < 0) {
+ pr_err("%s dequeue error %d\n", __func__, ret);
+ dev->error = 1;
+ r = -EIO;
+ goto end;
+ }
+ pr_debug("%s dequeued tx req/%x\n", __func__, (uint)req);
+ }
+ ret = usb_ep_dequeue(dev->ep_out, dev->rx_req[0]);
+ if (ret < 0) {
+ pr_err("%s dequeue rx error %d\n", __func__, ret);
+ dev->error = 1;
+ r = -EIO;
+ goto end;
+ }
+
+end:
+ dvc_dfx_unlock(&dev->read_excl);
+ dvc_dfx_unlock(&dev->write_excl);
+ return r;
+}
+
+static int dvc_dfx_open(struct inode *ip, struct file *fp)
+{
+ pr_info("%s\n", __func__);
+ if (!_dvc_dfx_dev)
+ return -ENODEV;
+
+ if (dvc_dfx_lock(&_dvc_dfx_dev->open_excl))
+ return -EBUSY;
+
+ fp->private_data = _dvc_dfx_dev;
+
+ /* clear the error latch */
+ _dvc_dfx_dev->error = 0;
+ _dvc_dfx_dev->transfering = 0;
+
+ return 0;
+}
+
+static int dvc_dfx_release(struct inode *ip, struct file *fp)
+{
+ pr_info("%s\n", __func__);
+
+ dvc_dfx_unlock(&_dvc_dfx_dev->open_excl);
+ return 0;
+}
+
+/* file operations for DvC.Dfx device /dev/usb_dvc_dfx */
+static const struct file_operations dvc_dfx_fops = {
+ .owner = THIS_MODULE,
+ .open = dvc_dfx_open,
+ .release = dvc_dfx_release,
+};
+
+static struct miscdevice dvc_dfx_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_dvc_dfx",
+ .fops = &dvc_dfx_fops,
+};
+
+static int dvc_dfx_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* DC_REQUEST_SET_RESET ... stop active transfer */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | DC_REQUEST_SET_RESET:
+ if (w_index != dev->data_id)
+ goto invalid;
+
+ pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+ w_value, w_index, w_length);
+
+ dvc_dfx_disable_transfer();
+ value = 0;
+ break;
+
+ default:
+invalid:
+ pr_debug("unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("%s setup response queue error\n", __func__);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int
+dvc_dfx_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct dvc_dfx_dev *dev = func_to_dvc_dfx(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ pr_info("%s dev: %p\n", __func__, dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->data_id = id;
+ dfx_data_interface_desc.bInterfaceNumber = id;
+ dfx_iad_desc.bFirstInterface = id;
+
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->ctrl_id = id;
+ dfx_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = dfx_create_bulk_endpoints(dev, &dfx_fullspeed_in_desc,
+ &dfx_fullspeed_out_desc,
+ &dfx_superspeed_in_comp_desc,
+ &dfx_superspeed_out_comp_desc
+ );
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ dfx_highspeed_in_desc.bEndpointAddress =
+ dfx_fullspeed_in_desc.bEndpointAddress;
+ dfx_highspeed_out_desc.bEndpointAddress =
+ dfx_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ dfx_superspeed_in_desc.bEndpointAddress =
+ dfx_fullspeed_in_desc.bEndpointAddress;
+
+ dfx_superspeed_out_desc.bEndpointAddress =
+ dfx_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ pr_info("%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+dvc_dfx_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct dvc_dfx_dev *dev = func_to_dvc_dfx(f);
+ struct usb_request *req;
+ int i;
+
+ dev->online = 0;
+ dev->online_ctrl = 0;
+ dev->online_data = 0;
+ dev->transfering = 0;
+ dev->error = 0;
+
+ dfx_string_defs[DVCDFX_CTRL_IDX].id = 0;
+
+ wake_up(&dev->read_wq);
+
+ for (i = 0; i < DFX_RX_REQ_MAX; i++)
+ dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+ dvc_dfx_request_free(req, dev->ep_in);
+
+}
+
+static int dvc_dfx_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct dvc_dfx_dev *dev = func_to_dvc_dfx(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_info("%s intf: %d alt: %d\n", __func__, intf, alt);
+ if (intf == dfx_data_interface_desc.bInterfaceNumber) {
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_by_speed in error %d\n",
+ __func__, intf, alt, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+ __func__, intf, alt, ret);
+ return ret;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_enable out error %d\n",
+ __func__, intf, alt, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_enable out err %d\n",
+ __func__, intf, alt, ret);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->online_data = 1;
+ }
+ if (intf == dfx_interface_desc.bInterfaceNumber)
+ dev->online_ctrl = 1;
+
+ if (dev->online_data && dev->online_ctrl) {
+ dev->online = 1;
+ dev->error = 0;
+ }
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void dvc_dfx_function_disable(struct usb_function *f)
+{
+ struct dvc_dfx_dev *dev = func_to_dvc_dfx(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ pr_info("%s cdev %p\n", __func__, cdev);
+
+ if (dev->transfering)
+ dvc_dfx_disable_transfer();
+
+ dev->online = 0;
+ dev->online_ctrl = 0;
+ dev->online_data = 0;
+ dev->error = 0;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ pr_debug("%s disabled\n", dev->function.name);
+}
+
+static int dvc_dfx_bind_config(struct usb_configuration *c)
+{
+ struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+ int status;
+
+ pr_info("%s\n", __func__);
+
+ if (dfx_string_defs[DVCDFX_CTRL_IDX].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ dfx_string_defs[DVCDFX_CTRL_IDX].id = status;
+
+ dfx_interface_desc.iInterface = status;
+
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ dfx_string_defs[DVCDFX_DATA_IDX].id = status;
+
+ dfx_data_interface_desc.iInterface = status;
+
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ dfx_string_defs[DVCDFX_IAD_IDX].id = status;
+
+ dfx_iad_desc.iFunction = status;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "dvcdfx";
+ dev->function.fs_descriptors = fs_dfx_descs;
+ dev->function.hs_descriptors = hs_dfx_descs;
+ dev->function.ss_descriptors = ss_dfx_descs;
+ dev->function.strings = dfx_strings;
+ dev->function.bind = dvc_dfx_function_bind;
+ dev->function.unbind = dvc_dfx_function_unbind;
+ dev->function.set_alt = dvc_dfx_function_set_alt;
+ dev->function.disable = dvc_dfx_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int dvc_dfx_setup(void)
+{
+ struct dvc_dfx_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->tx_xfer);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ _dvc_dfx_dev = dev;
+
+ ret = misc_register(&dvc_dfx_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ pr_err("DvC.Dfx gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void dvc_dfx_cleanup(void)
+{
+ misc_deregister(&dvc_dfx_device);
+
+ kfree(_dvc_dfx_dev);
+ _dvc_dfx_dev = NULL;
+}
--- /dev/null
+/*
+ * Gadget Driver for Android DvC.Trace Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+
+#define TRACE_TX_REQ_MAX 3
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_trace_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+ u8 ctrl_id, data_id;
+ u8 class_id, subclass_id;
+
+ struct usb_ep *ep_in;
+
+ int online;
+ int online_data;
+ int online_ctrl;
+ int transfering;
+ int error;
+
+ atomic_t write_excl;
+ atomic_t open_excl;
+
+ wait_queue_head_t write_wq;
+
+ struct list_head tx_idle;
+ struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor trace_iad_desc = {
+ .bLength = sizeof(trace_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* debug control + data */
+ .bFunctionClass = USB_CLASS_DEBUG,
+ .bFunctionSubClass = USB_SUBCLASS_DVC_TRACE,
+ /* .bFunctionProtocol = 0, */
+ /* .iFunction = DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_DEBUG,
+ .bInterfaceSubClass = USB_SUBCLASS_DEBUG_CONTROL,
+ /* .bInterfaceProtocol = 0, */
+};
+
+#define DC_DVCTRACE_ATTRI_LENGTH DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DVCTRACE_TOTAL_LENGTH (DC_DVCTRACE_ATTRI_LENGTH \
+ + DC_OUTPUT_CONNECTION_SIZE \
+ + DC_OUTPUT_CONNECTION_SIZE \
+ + DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24))
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCT, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCT) trace_debug_attri_desc = {
+ .bLength = DC_DVCTRACE_ATTRI_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = DC_DEBUG_ATTRIBUTES,
+ .bcdDC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(DC_DVCTRACE_TOTAL_LENGTH),
+ .bmSupportedFeatures = 0, /* Debug Event Supported, per SAS */
+ .bControlSize = 2,
+ .bmControl = { /* per SAS */
+ [0] = 0xFF,
+ [1] = 0x3F,
+ },
+ .wAuxDataSize = __constant_cpu_to_le16(0x20),
+ .dInputBufferSize = __constant_cpu_to_le32(0x00), /* per SAS */
+ .dOutputBufferSize = __constant_cpu_to_le32(TRACE_BULK_BUFFER_SIZE),
+ .qBaseAddress = 0, /* revision */
+ .hGlobalID = { /* revision */
+ [0] = 0,
+ [1] = 0,
+ }
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_usb_desc = {
+ .bLength = DC_OUTPUT_CONNECTION_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = DC_OUTPUT_CONNECTION,
+ .bConnectionID = 0x01, /* USB */
+ .bConnectionType = DC_CONNECTION_USB,
+ .bAssocConnection = 0, /* No related input-connection */
+ .wSourceID = __constant_cpu_to_le16(0x01),
+ /* .iConnection = DYNAMIC, */
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_pti_desc = {
+ .bLength = DC_OUTPUT_CONNECTION_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = DC_OUTPUT_CONNECTION,
+ .bConnectionID = 0, /* PTI */
+ .bConnectionType = DC_CONNECTION_DEBUG_DATA,
+ .bAssocConnection = 0, /* No related input-connection */
+ .wSourceID = __constant_cpu_to_le16(0x01),
+ /* .iConnection = DYNAMIC, */
+};
+
+#define DC_DVCTRACE_UNIT_LENGTH DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24)
+
+DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24);
+
+static struct DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24)
+ trace_debug_unit_stm_desc = {
+ .bLength = DC_DVCTRACE_UNIT_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = DC_DEBUG_UNIT,
+ .bUnitID = 0x01, /* per SAS */
+/* STM Trace Unit processor: revision */
+ .bDebugUnitType = DC_UNIT_TYPE_TRACE_PROC,
+ /* STM: Trace compressor controller */
+ .bDebugSubUnitType = DC_UNIT_SUBTYPE_TRACEZIP,
+ .bAliasUnitID = 0, /* no associated debug unit */
+ .bNrInPins = STM_NB_IN_PINS, /* p */
+/* wSourceID contains STM_NB_IN_PINS elements */
+/* .wSourceID = {0}, */
+ .bNrOutPins = 0x02, /* q */
+ .dTraceFormat = {
+ [0] = __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+ [1] = __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+ },
+ .dStreamID = __constant_cpu_to_le32(0xFFFFFFFF),
+ .bControlSize = 0x02, /* n */
+ .bmControl = {
+ [0] = 0xFF,
+ [1] = 0x3F,
+ },
+ .wAuxDataSize = __constant_cpu_to_le16(24), /* m */
+ .qBaseAddress = 0, /* revision */
+ .hIPID = {
+ [0] = 0,
+ [1] = 0,
+ },
+ /* .iDebugUnitType = DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_data_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_DEBUG,
+ .bInterfaceSubClass = USB_SUBCLASS_DVC_TRACE,
+ /* .bInterfaceProtocol = 0, */
+};
+
+static struct usb_endpoint_descriptor trace_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor trace_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor trace_superspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor trace_superspeed_in_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+};
+
+static struct usb_descriptor_header *fs_trace_descs[] = {
+ (struct usb_descriptor_header *) &trace_iad_desc,
+ (struct usb_descriptor_header *) &trace_data_interface_desc,
+ (struct usb_descriptor_header *) &trace_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &trace_interface_desc,
+ (struct usb_descriptor_header *) &trace_debug_attri_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+ (struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_trace_descs[] = {
+ (struct usb_descriptor_header *) &trace_iad_desc,
+ (struct usb_descriptor_header *) &trace_data_interface_desc,
+ (struct usb_descriptor_header *) &trace_highspeed_in_desc,
+ (struct usb_descriptor_header *) &trace_interface_desc,
+ (struct usb_descriptor_header *) &trace_debug_attri_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+ (struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_trace_descs[] = {
+ (struct usb_descriptor_header *) &trace_iad_desc,
+ (struct usb_descriptor_header *) &trace_data_interface_desc,
+ (struct usb_descriptor_header *) &trace_superspeed_in_desc,
+ (struct usb_descriptor_header *) &trace_superspeed_in_comp_desc,
+ (struct usb_descriptor_header *) &trace_interface_desc,
+ (struct usb_descriptor_header *) &trace_debug_attri_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+ (struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+ (struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+ NULL,
+};
+
+/* string descriptors: */
+#define DVCTRACE_CTRL_IDX 0
+#define DVCTRACE_DATA_IDX 1
+#define DVCTRACE_IAD_IDX 2
+#define DVCTRACE_CONN_PTI_IDX 3
+#define DVCTRACE_CONN_USB_IDX 4
+#define DVCTRACE_UNIT_STM_IDX 5
+
+/* static strings, in UTF-8 */
+static struct usb_string trace_string_defs[] = {
+ [DVCTRACE_CTRL_IDX].s = "Debug Sub-Class DvC.Trace (Control)",
+ [DVCTRACE_DATA_IDX].s = "Debug Sub-Class DvC.Trace (Data)",
+ [DVCTRACE_IAD_IDX].s = "Debug Sub-Class DvC.Trace",
+ [DVCTRACE_CONN_PTI_IDX].s = "MIPI PTIv1 output Connector ",
+ [DVCTRACE_CONN_USB_IDX].s = "USB Device output connector",
+ [DVCTRACE_UNIT_STM_IDX].s = "MIPI STM Debug Unit",
+ { /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings trace_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = trace_string_defs,
+};
+
+static struct usb_gadget_strings *trace_strings[] = {
+ &trace_string_table,
+ NULL,
+};
+
+/* temporary var used between dvc_trace_open() and dvc_trace_gadget_bind() */
+static struct dvc_trace_dev *_dvc_trace_dev;
+
+static inline struct dvc_trace_dev *func_to_dvc_trace(struct usb_function *f)
+{
+ return container_of(f, struct dvc_trace_dev, function);
+}
+
+static int dvc_trace_is_enabled(void)
+{
+ if (!stm_is_enabled()) {
+ pr_info("%s STM/PTI block is not enabled\n", __func__);
+ return 0;
+ }
+ return 1;
+}
+
+static struct usb_request *dvc_trace_request_new(struct usb_ep *ep,
+ int buffer_size, dma_addr_t dma)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->dma = dma;
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void dvc_trace_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void dvc_trace_req_put(struct dvc_trace_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_trace_req_get(struct dvc_trace_dev *dev,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void dvc_trace_set_disconnected(struct dvc_trace_dev *dev)
+{
+ dev->transfering = 0;
+}
+
+static void dvc_trace_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+ if (req->status != 0)
+ dvc_trace_set_disconnected(dev);
+
+ dvc_trace_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static inline int dvc_trace_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void dvc_trace_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static int trace_create_bulk_endpoints(struct dvc_trace_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_ss_ep_comp_descriptor *in_comp_desc
+ )
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ pr_debug("%s dev: %p\n", __func__, dev);
+
+ in_desc->bEndpointAddress |= 0x1;
+ ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+ if (!ep) {
+ pr_err("%s usb_ep_autoconfig for ep_in failed\n", __func__);
+ return -ENODEV;
+ }
+ pr_debug("%s usb_ep_autoconfig for ep_in got %s\n", __func__, ep->name);
+
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ for (i = 0; i < TRACE_TX_REQ_MAX; i++) {
+ req = dvc_trace_request_new(dev->ep_in, TRACE_BULK_BUFFER_SIZE,
+ (dma_addr_t)TRACE_BULK_IN_BUFFER_ADDR);
+ if (!req)
+ goto fail;
+ req->complete = dvc_trace_complete_in;
+ dvc_trace_req_put(dev, &dev->tx_idle, req);
+ pr_debug("%s req= %x : for %s predefined TRB\n", __func__,
+ (uint)req, ep->name);
+ }
+
+ return 0;
+
+fail:
+ pr_err("%s could not allocate requests\n", __func__);
+ return -1;
+}
+
+static ssize_t dvc_trace_start_transfer(size_t count)
+{
+ struct dvc_trace_dev *dev = _dvc_trace_dev;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ if (!_dvc_trace_dev)
+ return -ENODEV;
+
+ if (dvc_trace_lock(&dev->write_excl))
+ return -EBUSY;
+
+ /* we will block until enumeration completes */
+ while (!(dev->online || dev->error)) {
+ pr_debug("%s: waiting for online state\n", __func__);
+ ret = wait_event_interruptible(dev->write_wq,
+ (dev->online || dev->error));
+
+ if (ret < 0) {
+ /* not at CONFIGURED state */
+ pr_info("%s !dev->online already\n", __func__);
+ dvc_trace_unlock(&dev->write_excl);
+ return ret;
+ }
+ }
+
+ /* queue a ep_in endless request */
+ while (r > 0) {
+ if (dev->error) {
+ pr_debug("%s dev->error\n", __func__);
+ r = -EIO;
+ break;
+ }
+
+ if (!dev->online) {
+ pr_debug("%s !dev->online\n", __func__);
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ dev->error || !dev->online ||
+ (req = dvc_trace_req_get(dev, &dev->tx_idle)));
+
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ if (req != 0) {
+ if (count > TRACE_BULK_BUFFER_SIZE)
+ xfer = TRACE_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+ __func__);
+ req->no_interrupt = 1;
+ req->context = &dev->function;
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("%s: xfer error %d\n", __func__, ret);
+ dev->error = 1;
+ dev->transfering = 0;
+ r = -EIO;
+ break;
+ }
+ pr_debug("%s: xfer=%d/%d queued req/%x\n", __func__,
+ xfer, r, (uint)req);
+ dvc_trace_req_put(dev, &dev->tx_xfer, req);
+ r -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+ }
+ if (req) {
+ pr_debug("%s req re-added to tx_idle on error\n", __func__);
+ dvc_trace_req_put(dev, &dev->tx_idle, req);
+ } else {
+ dev->transfering = 1;
+ }
+ dvc_trace_unlock(&dev->write_excl);
+ pr_debug("%s end\n", __func__);
+ return ret;
+}
+
+static int dvc_trace_disable_transfer(void)
+{
+ struct dvc_trace_dev *dev = _dvc_trace_dev;
+ struct usb_request *req = 0;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ if (!_dvc_trace_dev)
+ return -ENODEV;
+
+ if (dvc_trace_lock(&dev->write_excl))
+ return -EBUSY;
+
+ if (dev->error) {
+ pr_debug("%s dev->error\n", __func__);
+ dvc_trace_unlock(&dev->write_excl);
+ return -EIO;
+ }
+
+ if ((!dev->online) || (!dev->transfering)) {
+ pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+ dvc_trace_unlock(&dev->write_excl);
+ return -EIO;
+ }
+
+ /* get an xfer tx request to use */
+ while ((req = dvc_trace_req_get(dev, &dev->tx_xfer))) {
+ ret = usb_ep_dequeue(dev->ep_in, req);
+ if (ret < 0) {
+ pr_err("%s: dequeue error %d\n", __func__, ret);
+ dev->error = 1;
+ dvc_trace_unlock(&dev->write_excl);
+ return -EIO;
+ }
+ pr_debug("%s: dequeued req/%x\n", __func__, (uint)req);
+ }
+
+ dvc_trace_unlock(&dev->write_excl);
+ return 1;
+}
+
+static int dvc_trace_open(struct inode *ip, struct file *fp)
+{
+ pr_debug("%s\n", __func__);
+ if (!_dvc_trace_dev)
+ return -ENODEV;
+
+ if (dvc_trace_lock(&_dvc_trace_dev->open_excl))
+ return -EBUSY;
+
+ fp->private_data = _dvc_trace_dev;
+
+ /* clear the error latch */
+ _dvc_trace_dev->error = 0;
+ _dvc_trace_dev->transfering = 0;
+
+ return 0;
+}
+
+static int dvc_trace_release(struct inode *ip, struct file *fp)
+{
+ pr_debug("%s\n", __func__);
+
+ dvc_trace_unlock(&_dvc_trace_dev->open_excl);
+ return 0;
+}
+
+/* file operations for DvC.Trace device /dev/usb_dvc_trace */
+static const struct file_operations dvc_trace_fops = {
+ .owner = THIS_MODULE,
+ .open = dvc_trace_open,
+ .release = dvc_trace_release,
+};
+
+static struct miscdevice dvc_trace_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_dvc_trace",
+ .fops = &dvc_trace_fops,
+};
+
+static int dvc_trace_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+
+ struct dvc_trace_dev *dev = _dvc_trace_dev;
+ int value = -EOPNOTSUPP;
+ int ret;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* DC_REQUEST_SET_RESET ... stop active transfer */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | DC_REQUEST_SET_RESET:
+ if (w_index != dev->data_id)
+ goto invalid;
+
+ pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+ w_value, w_index, w_length);
+
+ dvc_trace_disable_transfer();
+ value = 0;
+ break;
+
+ /* DC_REQUEST_SET_TRACE ... start trace transfer */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | DC_REQUEST_SET_TRACE:
+
+ pr_info("%s DC_REQUEST_SET_TRACE v%04x i%04x l%u\n", __func__,
+ w_value, w_index, w_length);
+
+ if (!w_index)
+ ret = dvc_trace_disable_transfer();
+ else
+ ret = dvc_trace_start_transfer(4096);
+
+ if (ret < 0)
+ value = -EINVAL;
+ else
+ value = (int) w_index;
+ break;
+
+ default:
+invalid:
+ pr_debug("unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("%s setup response queue error\n", __func__);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int
+dvc_trace_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct dvc_trace_dev *dev = func_to_dvc_trace(f);
+ int id;
+ int ret;
+ int status;
+
+ dev->cdev = cdev;
+ pr_debug("%s dev: %p\n", __func__, dev);
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ if (trace_string_defs[DVCTRACE_CTRL_IDX].id == 0) {
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_DATA_IDX].id = status;
+ trace_data_interface_desc.iInterface = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_CTRL_IDX].id = status;
+ trace_interface_desc.iInterface = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_IAD_IDX].id = status;
+ trace_iad_desc.iFunction = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_CONN_PTI_IDX].id = status;
+ trace_output_conn_pti_desc.iConnection = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_CONN_USB_IDX].id = status;
+ trace_output_conn_usb_desc.iConnection = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ trace_string_defs[DVCTRACE_UNIT_STM_IDX].id = status;
+ trace_debug_unit_stm_desc.iDebugUnitType = status;
+ }
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->data_id = id;
+ trace_data_interface_desc.bInterfaceNumber = id;
+ trace_iad_desc.bFirstInterface = id;
+
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ dev->ctrl_id = id;
+ trace_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = trace_create_bulk_endpoints(dev, &trace_fullspeed_in_desc,
+ &trace_superspeed_in_comp_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ trace_highspeed_in_desc.bEndpointAddress =
+ trace_fullspeed_in_desc.bEndpointAddress;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ trace_superspeed_in_desc.bEndpointAddress =
+ trace_fullspeed_in_desc.bEndpointAddress;
+ }
+
+ pr_debug("%s speed %s: IN/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name);
+ return 0;
+}
+
+static void
+dvc_trace_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct dvc_trace_dev *dev = func_to_dvc_trace(f);
+ struct usb_request *req;
+
+ dev->online = 0;
+ dev->online_data = 0;
+ dev->online_ctrl = 0;
+ dev->error = 0;
+ trace_string_defs[DVCTRACE_CTRL_IDX].id = 0;
+
+ wake_up(&dev->write_wq);
+
+ while ((req = dvc_trace_req_get(dev, &dev->tx_idle)))
+ dvc_trace_request_free(req, dev->ep_in);
+}
+
+static int dvc_trace_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct dvc_trace_dev *dev = func_to_dvc_trace(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_debug("%s intf: %d alt: %d\n", __func__, intf, alt);
+
+ if (intf == trace_interface_desc.bInterfaceNumber)
+ dev->online_ctrl = 1;
+
+ if (intf == trace_data_interface_desc.bInterfaceNumber) {
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_by_speed in err %d\n",
+ __func__, intf, alt, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret) {
+ pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+ __func__, intf, alt, ret);
+ return ret;
+ }
+ dev->online_data = 1;
+ }
+
+ if (dev->online_data && dev->online_ctrl) {
+ dev->online = 1;
+ dev->transfering = 0;
+ }
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->write_wq);
+ return 0;
+}
+
+static void dvc_trace_function_disable(struct usb_function *f)
+{
+ struct dvc_trace_dev *dev = func_to_dvc_trace(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ pr_debug("%s dev %p\n", __func__, cdev);
+
+ if (dev->transfering)
+ dvc_trace_disable_transfer();
+
+ dev->online = 0;
+ dev->online_data = 0;
+ dev->online_ctrl = 0;
+ dev->error = 0;
+ usb_ep_disable(dev->ep_in);
+
+ /* writer may be blocked waiting for us to go online */
+ wake_up(&dev->write_wq);
+
+ pr_debug("%s : %s disabled\n", __func__, dev->function.name);
+}
+
+static int dvc_trace_bind_config(struct usb_configuration *c)
+{
+ struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+ pr_debug("%s\n", __func__);
+
+ dev->cdev = c->cdev;
+ dev->function.name = "dvctrace";
+ dev->function.strings = trace_strings;
+ dev->function.fs_descriptors = fs_trace_descs;
+ dev->function.hs_descriptors = hs_trace_descs;
+ dev->function.ss_descriptors = ss_trace_descs;
+ dev->function.bind = dvc_trace_function_bind;
+ dev->function.unbind = dvc_trace_function_unbind;
+ dev->function.set_alt = dvc_trace_function_set_alt;
+ dev->function.disable = dvc_trace_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int dvc_trace_setup(void)
+{
+ struct dvc_trace_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->tx_xfer);
+
+ init_waitqueue_head(&dev->write_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ _dvc_trace_dev = dev;
+
+ ret = misc_register(&dvc_trace_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ pr_err("DvC.Trace gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void dvc_trace_cleanup(void)
+{
+ misc_deregister(&dvc_trace_device);
+
+ kfree(_dvc_trace_dev);
+ _dvc_trace_dev = NULL;
+}
fsg->common->new_fsg = NULL;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
/* FIXME: make interruptible or killable somehow? */
- wait_event(common->fsg_wait, common->fsg != fsg);
+ wait_event_timeout(common->fsg_wait, common->fsg != fsg, msecs_to_jiffies(1000));
}
fsg_common_put(common);
#define __GADGET_CHIPS_H
#include <linux/usb/gadget.h>
+#include <asm/intel-mid.h>
/*
* NOTICE: the entries below are alphabetical and should be kept
*/
#define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name))
#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
+#define gadget_is_middwc3tng(g) ((!strcmp("dwc3-gadget", (g)->name)) && \
+ (intel_mid_identify_cpu() == \
+ INTEL_MID_CPU_CHIP_TANGIER))
#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name))
#define gadget_is_net2280(g) (!strcmp("net2280", (g)->name))
#define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name))
#define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name))
-
/**
* gadget_supports_altsettings - return true if altsettings work
* @gadget: the gadget in question
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include "u_serial.h"
#if defined USB_ETH_RNDIS
USB_GADGET_COMPOSITE_OPTIONS();
+static char ethernet_config[6];
+module_param_string(ethernet_config, ethernet_config, sizeof(ethernet_config),
+ 0444);
+MODULE_PARM_DESC(ethernet_config,
+ "ethernet configuration : should be cdc or rndis");
+
/***************************** Device Descriptor ****************************/
#define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
/* register configurations */
- status = rndis_config_register(cdev);
- if (unlikely(status < 0))
- goto fail2;
+ /* RNDIS configuration */
+ if (strncmp(ethernet_config, "rndis", 5) == 0) {
+ status = rndis_config_register(cdev);
+
+ if (unlikely(status < 0))
+ goto fail2;
+ } else if (strncmp(ethernet_config, "cdc", 3) == 0) {
+ /* CDC ECM configuration */
+ status = cdc_config_register(cdev);
+
+ if (unlikely(status < 0))
+ goto fail2;
+ } else {
+ status = rndis_config_register(cdev);
+
+ if (unlikely(status < 0))
+ goto fail2;
+
+ status = cdc_config_register(cdev);
+
+ if (unlikely(status < 0))
+ goto fail2;
+ }
+
- status = cdc_config_register(cdev);
- if (unlikely(status < 0))
- goto fail2;
usb_composite_overwrite_options(cdev, &coverwrite);
/* we're done */
*/
size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
size += dev->port_usb->header_len;
- size += out->maxpacket - 1;
- size -= size % out->maxpacket;
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
{
tasklet_kill(&port->push);
/* wait for old opens to finish */
- wait_event(port->port.close_wait, gs_closed(port));
+ wait_event_timeout(port->port.close_wait, gs_closed(port), msecs_to_jiffies(1000));
WARN_ON(port->port_usb != NULL);
tty_port_destroy(&port->port);
kfree(port);
#include <linux/list.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
/* ------------------------------------------------------------------------- */
+static void usb_gadget_state_work(struct work_struct *work)
+{
+ struct usb_gadget *gadget = work_to_gadget(work);
+
+ sysfs_notify(&gadget->dev.kobj, NULL, "status");
+}
+
void usb_gadget_set_state(struct usb_gadget *gadget,
enum usb_device_state state)
{
gadget->state = state;
- sysfs_notify(&gadget->dev.kobj, NULL, "state");
+ schedule_work(&gadget->work);
}
EXPORT_SYMBOL_GPL(usb_gadget_set_state);
goto err1;
dev_set_name(&gadget->dev, "gadget");
+ INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
usb_gadget_remove_driver(udc);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ flush_work(&gadget->work);
device_unregister(&udc->dev);
device_unregister(&gadget->dev);
}
static int __init ehci_pci_init(void)
{
- if (usb_disabled())
+ //if (usb_disabled())
return -ENODEV;
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
};
MODULE_ALIAS("platform:xhci-hcd");
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+#include "../dwc3/dwc3-host-intel.c"
+#endif
+
int xhci_register_plat(void)
{
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+ return platform_driver_register(&dwc3_xhci_driver);
+#endif
return platform_driver_register(&usb_xhci_driver);
}
void xhci_unregister_plat(void)
{
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+ platform_driver_unregister(&dwc3_xhci_driver);
+ return;
+#endif
platform_driver_unregister(&usb_xhci_driver);
}
xhci_free_irq(xhci);
+ if (xhci->quirks & XHCI_PLAT)
+ return;
+
if (xhci->msix_entries) {
pci_disable_msix(pdev);
kfree(xhci->msix_entries);
--- /dev/null
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+/* This driver helps to switch Penwell OTG controller function between host
+ * and peripheral. It works with EHCI driver and Penwell client controller
+ * driver together.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/gpio.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "../core/usb.h"
+#include <linux/intel_mid_pm.h>
+
+#include <linux/usb/penwell_otg.h>
+
+#define DRIVER_DESC "Intel Penwell USB OTG transceiver driver"
+#define DRIVER_VERSION "0.8"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "penwell_otg";
+
+static void penwell_otg_remove(struct pci_dev *pdev);
+
+static int penwell_otg_set_host(struct usb_otg *otg, struct usb_bus *host);
+static int penwell_otg_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget);
+static int penwell_otg_start_srp(struct usb_otg *otg);
+static void penwell_otg_mon_bus(void);
+
+static int penwell_otg_msic_write(u16 addr, u8 data);
+
+static void penwell_otg_phy_low_power(int on);
+static int penwell_otg_ulpi_read(struct intel_mid_otg_xceiv *iotg,
+ u8 reg, u8 *val);
+static int penwell_otg_ulpi_write(struct intel_mid_otg_xceiv *iotg,
+ u8 reg, u8 val);
+static void penwell_spi_reset_phy(void);
+static int penwell_otg_charger_hwdet(bool enable);
+static void update_hsm(void);
+static void set_client_mode(void);
+
+#ifdef CONFIG_DEBUG_FS
+unsigned int *pm_sss0_base;
+
+int check_pm_otg(void)
+{
+ /* check whether bit 12 and 13 are 0 */
+ /* printk(">>>>leon, pm_sss0_base:0x%x\n", *(pm_sss0_base)); */
+ if (pm_sss0_base)
+ return (*pm_sss0_base) & 0x3000;
+ else
+ return 0;
+}
+#ifdef readl
+#undef readl
+#endif
+#ifdef writel
+#undef writel
+#endif
+#define readl(addr) ({ if (check_pm_otg()) { \
+ panic("usb otg, read reg:%p, pm_sss0_base:0x%x", \
+ addr, *(pm_sss0_base)); }; __le32_to_cpu(__raw_readl(addr)); })
+#define writel(b, addr) ({ if (check_pm_otg()) { \
+ panic("usb otg, write reg:%p, pm_sss0_base:0x%x", \
+ addr, *(pm_sss0_base)); }; __raw_writel(__cpu_to_le32(b), addr); })
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#include <linux/suspend.h>
+DECLARE_WAIT_QUEUE_HEAD(stop_host_wait);
+atomic_t pnw_sys_suspended;
+
+static int pnw_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *ignored)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ atomic_set(&pnw_sys_suspended, 1);
+ return NOTIFY_OK;
+ case PM_POST_SUSPEND:
+ atomic_set(&pnw_sys_suspended, 0);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pnw_sleep_pm_notifier = {
+ .notifier_call = pnw_sleep_pm_callback,
+ .priority = 0
+};
+
+/* the root hub will call this callback when device added/removed */
+static int otg_notify(struct notifier_block *nb, unsigned long action,
+ struct usb_device *udev)
+{
+ struct usb_phy *otg;
+ struct intel_mid_otg_xceiv *iotg;
+
+ /* skip bus add/remove notification, else access udev->parent could
+ * panic if bus register and unregister quickly(setup failed). And we
+ * do not care bus event.
+ */
+ if (action == USB_BUS_ADD || action == USB_BUS_REMOVE)
+ return NOTIFY_DONE;
+
+ /* Ignore root hub add/remove event */
+ if (!udev->parent) {
+ pr_debug("%s Ignore root hub otg_notify\n", __func__);
+ return NOTIFY_DONE;
+ }
+
+ /* Ignore USB devices on external hub */
+ if (udev->parent && udev->parent->parent) {
+ pr_debug("%s Ignore USB devices on external hub\n", __func__);
+ return NOTIFY_DONE;
+ }
+
+ otg = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (otg == NULL) {
+ pr_err("%s: failed to get otg transceiver\n", __func__);
+ return NOTIFY_BAD;
+ }
+ iotg = otg_to_mid_xceiv(otg);
+
+ switch (action) {
+ case USB_DEVICE_ADD:
+ pr_debug("Notify OTG HNP add device\n");
+ atomic_notifier_call_chain(&iotg->iotg_notifier,
+ MID_OTG_NOTIFY_CONNECT, iotg);
+ break;
+ case USB_DEVICE_REMOVE:
+ pr_debug("Notify OTG HNP delete device\n");
+ atomic_notifier_call_chain(&iotg->iotg_notifier,
+ MID_OTG_NOTIFY_DISCONN, iotg);
+ break;
+ case USB_OTG_TESTDEV:
+ pr_debug("Notify OTG test device\n");
+ atomic_notifier_call_chain(&iotg->iotg_notifier,
+ MID_OTG_NOTIFY_TEST, iotg);
+ break;
+ case USB_OTG_TESTDEV_VBUSOFF:
+ pr_debug("Notify OTG test device, Vbusoff mode\n");
+ atomic_notifier_call_chain(&iotg->iotg_notifier,
+ MID_OTG_NOTIFY_TEST_VBUS_OFF, iotg);
+ break;
+ default:
+ usb_put_phy(otg);
+ return NOTIFY_DONE;
+ }
+ usb_put_phy(otg);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block otg_nb = {
+ .notifier_call = otg_notify,
+};
+
+#define PNW_PM_RESUME_WAIT(a) do { \
+ while (atomic_read(&pnw_sys_suspended)) { \
+ wait_event_timeout(a, false, HZ/100); \
+ } \
+ } while (0)
+#else
+
+#define PNW_PM_RESUME_WAIT(a)
+
+#endif
+
+#define PNW_STOP_HOST(pnw) do { \
+ if ((pnw)->iotg.stop_host) { \
+ PNW_PM_RESUME_WAIT(stop_host_wait); \
+ (pnw)->iotg.stop_host(&(pnw)->iotg); \
+ } \
+ } while (0)
+
+inline int is_clovertrail(struct pci_dev *pdev)
+{
+ return (pdev->vendor == 0x8086 && pdev->device == 0xE006);
+}
+EXPORT_SYMBOL_GPL(is_clovertrail);
+
+static const char *state_string(enum usb_otg_state state)
+{
+ switch (state) {
+ case OTG_STATE_A_IDLE:
+ return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE:
+ return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON:
+ return "a_wait_bcon";
+ case OTG_STATE_A_HOST:
+ return "a_host";
+ case OTG_STATE_A_SUSPEND:
+ return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL:
+ return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL:
+ return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR:
+ return "a_vbus_err";
+ case OTG_STATE_B_IDLE:
+ return "b_idle";
+ case OTG_STATE_B_PERIPHERAL:
+ return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON:
+ return "b_wait_acon";
+ case OTG_STATE_B_HOST:
+ return "b_host";
+ default:
+ return "UNDEFINED";
+ }
+}
+
+static const char *charger_string(enum usb_charger_type charger)
+{
+ switch (charger) {
+ case CHRG_SDP:
+ return "Standard Downstream Port";
+ case CHRG_SDP_INVAL:
+ return "Invalid Standard Downstream Port";
+ case CHRG_CDP:
+ return "Charging Downstream Port";
+ case CHRG_DCP:
+ return "Dedicated Charging Port";
+ case CHRG_ACA:
+ return "Accessory Charger Adaptor";
+ case CHRG_SE1:
+ return "SE1 Charger";
+ case CHRG_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Undefined";
+ }
+}
+
+static const char *psc_string(enum power_supply_charger_cable_type charger)
+{
+ switch (charger) {
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ return "Standard Downstream Port";
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ return "Charging Downstream Port";
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ return "Dedicated Charging Port";
+ case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+ return "Accessory Charger Adaptor";
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+ return "Accessory Charger Adaptor Dock";
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+ return "Accessory Charger Adaptor Type A";
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+ return "Accessory Charger Adaptor Type B";
+ case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+ return "Accessory Charger Adaptor Type C";
+ case POWER_SUPPLY_CHARGER_TYPE_SE1:
+ return "SE1 Charger";
+ case POWER_SUPPLY_CHARGER_TYPE_NONE:
+ return "Unknown";
+ default:
+ return "Undefined";
+ }
+}
+
+
+static struct penwell_otg *the_transceiver;
+
+void penwell_update_transceiver(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+
+ if (!pnw->qwork) {
+ dev_warn(pnw->dev, "no workqueue for state machine\n");
+ return;
+ }
+
+ spin_lock_irqsave(&pnw->lock, flags);
+ if (!pnw->queue_stop) {
+ queue_work(pnw->qwork, &pnw->work);
+ dev_dbg(pnw->dev, "transceiver is updated\n");
+ }
+ spin_unlock_irqrestore(&pnw->lock, flags);
+}
+
+static int penwell_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ otg->host = host;
+
+ return 0;
+}
+
+static int penwell_otg_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ otg->gadget = gadget;
+
+ return 0;
+}
+
+static void penwell_otg_set_charger(enum usb_charger_type charger)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__,
+ charger_string(charger));
+
+ switch (charger) {
+ case CHRG_SDP:
+ case CHRG_DCP:
+ case CHRG_CDP:
+ case CHRG_ACA:
+ case CHRG_SDP_INVAL:
+ case CHRG_SE1:
+ case CHRG_UNKNOWN:
+ pnw->charging_cap.chrg_type = charger;
+ break;
+ default:
+ dev_warn(pnw->dev, "undefined charger type\n");
+ break;
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void _penwell_otg_update_chrg_cap(enum usb_charger_type charger,
+ unsigned ma)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int flag = 0;
+ int event, retval;
+
+ dev_dbg(pnw->dev, "%s = %s, %d --->\n", __func__,
+ charger_string(charger), ma);
+
+ /* Check charger type information */
+ if (pnw->charging_cap.chrg_type != charger) {
+ if (pnw->charging_cap.chrg_type == CHRG_UNKNOWN ||
+ charger == CHRG_UNKNOWN) {
+ penwell_otg_set_charger(charger);
+ } else if (pnw->charging_cap.chrg_type == CHRG_SDP &&
+ charger == CHRG_SDP_INVAL) {
+ penwell_otg_set_charger(charger);
+ } else
+ return;
+ } else {
+ /* Do nothing if no update for current */
+ if (pnw->charging_cap.ma == ma)
+ return;
+ }
+
+ /* set current */
+ switch (pnw->charging_cap.chrg_type) {
+ case CHRG_SDP:
+ if (pnw->charging_cap.ma == CHRG_CURR_DISCONN
+ && (ma == CHRG_CURR_SDP_LOW
+ || ma == CHRG_CURR_SDP_HIGH)) {
+ /* SDP event: charger connect */
+ event = USBCHRG_EVENT_CONNECT;
+ flag = 1;
+ } else if (pnw->charging_cap.ma == CHRG_CURR_SDP_LOW
+ && ma == CHRG_CURR_SDP_HIGH) {
+ /* SDP event: configuration update */
+ event = USBCHRG_EVENT_UPDATE;
+ flag = 1;
+ } else if (pnw->charging_cap.ma == CHRG_CURR_SDP_HIGH
+ && ma == CHRG_CURR_SDP_LOW) {
+ /* SDP event: configuration update */
+ event = USBCHRG_EVENT_UPDATE;
+ flag = 1;
+ } else if (pnw->charging_cap.ma == CHRG_CURR_SDP_SUSP
+ && (ma == CHRG_CURR_SDP_LOW
+ || ma == CHRG_CURR_SDP_HIGH)) {
+ /* SDP event: resume from suspend state */
+ event = USBCHRG_EVENT_RESUME;
+ flag = 1;
+ } else if ((pnw->charging_cap.ma == CHRG_CURR_SDP_LOW
+ || pnw->charging_cap.ma == CHRG_CURR_SDP_HIGH)
+ && ma == CHRG_CURR_SDP_SUSP) {
+ /* SDP event: enter suspend state */
+ event = USBCHRG_EVENT_SUSPEND;
+ flag = 1;
+ } else if (ma == 0) {
+ event = USBCHRG_EVENT_DISCONN;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "SDP: no need to update EM\n");
+ break;
+ case CHRG_DCP:
+ if (ma == CHRG_CURR_DCP) {
+ /* DCP event: charger connect */
+ event = USBCHRG_EVENT_CONNECT;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "DCP: no need to update EM\n");
+ break;
+ case CHRG_SE1:
+ if (ma == CHRG_CURR_SE1) {
+ /* SE1 event: charger connect */
+ event = USBCHRG_EVENT_CONNECT;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "SE1: no need to update EM\n");
+ break;
+ case CHRG_CDP:
+ if (pnw->charging_cap.ma == CHRG_CURR_DISCONN
+ && ma == CHRG_CURR_CDP) {
+ /* CDP event: charger connect */
+ event = USBCHRG_EVENT_CONNECT;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "CDP: no need to update EM\n");
+ break;
+ case CHRG_UNKNOWN:
+ if (ma == CHRG_CURR_DISCONN) {
+ /* event: chargers disconnect */
+ event = USBCHRG_EVENT_DISCONN;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "UNKNOWN: no need to update EM\n");
+ break;
+ case CHRG_SDP_INVAL:
+ if (ma == CHRG_CURR_SDP_INVAL) {
+ event = USBCHRG_EVENT_UPDATE;
+ flag = 1;
+ } else
+ dev_dbg(pnw->dev, "SDP_INVAL: no need to update EM\n");
+ default:
+ break;
+ }
+
+ if (flag) {
+ pnw->charging_cap.ma = ma;
+ pnw->charging_cap.current_event = event;
+
+ /* Notify EM the charging current update */
+ dev_dbg(pnw->dev, "Notify EM charging capability change\n");
+ dev_dbg(pnw->dev, "%s event = %d ma = %d\n",
+ charger_string(pnw->charging_cap.chrg_type), event, ma);
+
+ if (pnw->bc_callback) {
+ retval = pnw->bc_callback(pnw->bc_arg, event,
+ &pnw->charging_cap);
+ if (retval)
+ dev_dbg(pnw->dev,
+ "bc callback return %d\n", retval);
+ }
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static enum power_supply_charger_cable_type usb_chrg_to_power_supply_chrg(
+ enum usb_charger_type chrg_type)
+{
+ switch (chrg_type) {
+ case CHRG_UNKNOWN: return POWER_SUPPLY_CHARGER_TYPE_NONE;
+ case CHRG_SDP: return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+ case CHRG_CDP: return POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+ case CHRG_SDP_INVAL: return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+ case CHRG_DCP: return POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+ case CHRG_ACA: return POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+ case CHRG_ACA_DOCK: return POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+ case CHRG_ACA_A: return POWER_SUPPLY_CHARGER_TYPE_ACA_A;
+ case CHRG_ACA_B: return POWER_SUPPLY_CHARGER_TYPE_ACA_B;
+ case CHRG_ACA_C: return POWER_SUPPLY_CHARGER_TYPE_ACA_C;
+ case CHRG_SE1: return POWER_SUPPLY_CHARGER_TYPE_SE1;
+ case CHRG_MHL: return POWER_SUPPLY_CHARGER_TYPE_MHL;
+ default: return POWER_SUPPLY_CHARGER_TYPE_NONE;
+ }
+}
+
+static enum power_supply_charger_event check_psc_event(
+ struct power_supply_cable_props old,
+ struct power_supply_cable_props new)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ /* Check charger type information */
+ if (old.chrg_type != new.chrg_type) {
+ if (old.chrg_type == POWER_SUPPLY_CHARGER_TYPE_NONE
+ && new.ma != 0)
+ return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ else if (new.chrg_type == POWER_SUPPLY_CHARGER_TYPE_NONE)
+ return POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+ else {
+ dev_dbg(pnw->dev, "not a valid event\n");
+ return -1;
+ }
+ }
+
+ /* Check the charging current limit */
+ if (old.ma == new.ma) {
+ dev_dbg(pnw->dev, "not a valid event\n");
+ return -1;
+ }
+
+ switch (new.chrg_type) {
+ case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+ if (old.ma == CHRG_CURR_DISCONN &&
+ (new.ma == CHRG_CURR_SDP_LOW ||
+ new.ma == CHRG_CURR_SDP_HIGH)) {
+ /* SDP event: charger connect */
+ return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ } else if (old.ma == CHRG_CURR_SDP_LOW &&
+ new.ma == CHRG_CURR_SDP_HIGH) {
+ /* SDP event: configuration update */
+ return POWER_SUPPLY_CHARGER_EVENT_UPDATE;
+ } else if (old.ma == CHRG_CURR_SDP_HIGH &&
+ new.ma == CHRG_CURR_SDP_LOW) {
+ /* SDP event: configuration update */
+ return POWER_SUPPLY_CHARGER_EVENT_UPDATE;
+ } else if (old.ma == CHRG_CURR_SDP_SUSP &&
+ (new.ma == CHRG_CURR_SDP_LOW ||
+ new.ma == CHRG_CURR_SDP_HIGH)) {
+ /* SDP event: resume from suspend state */
+ return POWER_SUPPLY_CHARGER_EVENT_RESUME;
+ } else if ((old.ma == CHRG_CURR_SDP_LOW ||
+ old.ma == CHRG_CURR_SDP_HIGH) &&
+ new.ma == CHRG_CURR_SDP_SUSP) {
+ /* SDP event: enter suspend state */
+ return POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+ } else
+ dev_dbg(pnw->dev, "SDP: no need to update EM\n");
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+ if (new.ma == CHRG_CURR_DCP) {
+ /* DCP event: charger connect */
+ return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ } else
+ dev_dbg(pnw->dev, "DCP: no need to update EM\n");
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+ if (pnw->charging_cap.ma == CHRG_CURR_DISCONN &&
+ new.ma == CHRG_CURR_CDP) {
+ /* CDP event: charger connect */
+ return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ } else
+ dev_dbg(pnw->dev, "CDP: no need to update EM\n");
+ break;
+ case POWER_SUPPLY_CHARGER_TYPE_NONE:
+ if (new.ma == CHRG_CURR_DISCONN) {
+ /* event: chargers disconnect */
+ return POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+ } else
+ dev_dbg(pnw->dev, "UNKNOWN: no need to update EM\n");
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void penwell_otg_update_chrg_cap(enum usb_charger_type charger,
+ unsigned ma)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ struct otg_bc_event *event;
+
+ pdev = to_pci_dev(pnw->dev);
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (!is_clovertrail(pdev)) {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ _penwell_otg_update_chrg_cap(charger, ma);
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+ } else {
+ dev_dbg(pnw->dev, "clv cable_props_update\n");
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ dev_err(pnw->dev, "no memory for charging event");
+ return;
+ }
+
+ event->cap.chrg_type = usb_chrg_to_power_supply_chrg(charger);
+ event->cap.ma = ma;
+ INIT_LIST_HEAD(&event->node);
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ list_add_tail(&event->node, &pnw->chrg_evt_queue);
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static int penwell_otg_set_power(struct usb_phy *otg, unsigned ma)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+ struct pci_dev *pdev;
+ struct otg_bc_event *event;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ pdev = to_pci_dev(pnw->dev);
+
+ if (!is_clovertrail(pdev)) {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+
+ if (pnw->charging_cap.chrg_type != CHRG_SDP) {
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+ return 0;
+ }
+
+ if (!pnw->otg_pdata->charging_compliance)
+ ma = CHRG_CURR_SDP_HIGH;
+
+ _penwell_otg_update_chrg_cap(CHRG_SDP, ma);
+
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+ } else {
+ dev_dbg(pnw->dev, "clv charger_set_power\n");
+
+ if (pnw->psc_cap.chrg_type != POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+ return 0;
+
+ if (!pnw->otg_pdata->charging_compliance)
+ ma = CHRG_CURR_SDP_HIGH;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ dev_err(pnw->dev, "no memory for charging event");
+ return -ENOMEM;
+ }
+
+ event->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+ event->cap.ma = ma;
+ INIT_LIST_HEAD(&event->node);
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ list_add_tail(&event->node, &pnw->chrg_evt_queue);
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return 0;
+}
+
+int penwell_otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+ unsigned long flags;
+ struct power_supply_cable_props *cap =
+ (struct power_supply_cable_props *)data;
+ struct penwell_otg *pnw = the_transceiver;
+
+ if (pnw == NULL)
+ return -ENODEV;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pnw->cap_lock, flags);
+ cap->chrg_evt = pnw->psc_cap.chrg_evt;
+ cap->chrg_type = pnw->psc_cap.chrg_type;
+ cap->ma = pnw->psc_cap.ma;
+ spin_unlock_irqrestore(&pnw->cap_lock, flags);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return 0;
+}
+
+int penwell_otg_query_charging_cap(struct otg_bc_cap *cap)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ if (pnw == NULL)
+ return -ENODEV;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (cap == NULL)
+ return -EINVAL;
+
+ if (is_clovertrail(to_pci_dev(pnw->dev)))
+ return -ENODEV;
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ cap->chrg_type = pnw->charging_cap.chrg_type;
+ cap->ma = pnw->charging_cap.ma;
+ cap->current_event = pnw->charging_cap.current_event;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_query_charging_cap);
+
+/* Register/unregister battery driver callback */
+void *penwell_otg_register_bc_callback(
+ int (*cb)(void *, int, struct otg_bc_cap *), void *arg)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ if (pnw == NULL)
+ return pnw;
+
+ if (is_clovertrail(to_pci_dev(pnw->dev)))
+ return NULL;
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+
+ if (pnw->bc_callback != NULL)
+ dev_dbg(pnw->dev, "callback has already registered\n");
+
+ pnw->bc_callback = cb;
+ pnw->bc_arg = arg;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ return pnw;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_register_bc_callback);
+
+int penwell_otg_unregister_bc_callback(void *handler)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ if (pnw == NULL)
+ return -ENODEV;
+
+ if (pnw != handler)
+ return -EINVAL;
+
+ if (is_clovertrail(to_pci_dev(pnw->dev)))
+ return -ENODEV;
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ pnw->bc_callback = NULL;
+ pnw->bc_arg = NULL;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_unregister_bc_callback);
+
+/* After probe, it should enable the power of USB PHY */
+static void penwell_otg_phy_enable(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 data;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ data = on ? 0x37 : 0x24;
+
+ mutex_lock(&pnw->msic_mutex);
+
+ if (penwell_otg_msic_write(MSIC_VUSB330CNT, data)) {
+ mutex_unlock(&pnw->msic_mutex);
+ dev_err(pnw->dev, "Fail to enable PHY power\n");
+ return;
+ }
+
+ mutex_unlock(&pnw->msic_mutex);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* A-device drives vbus, controlled through MSIC register */
+static int penwell_otg_set_vbus(struct usb_otg *otg, bool enabled)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 data;
+ unsigned long flags;
+ int retval = 0;
+ struct otg_bc_event *evt;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, enabled ? "on" : "off");
+
+ /*
+ * For Clovertrail, VBUS is driven by TPS2052 power switch chip.
+ * But TPS2052 is controlled by ULPI PHY.
+ */
+ if (is_clovertrail(to_pci_dev(pnw->dev))) {
+ penwell_otg_phy_low_power(0);
+ if (enabled)
+ penwell_otg_ulpi_write(&pnw->iotg,
+ ULPI_OTGCTRLSET, DRVVBUS | DRVVBUS_EXTERNAL);
+ else
+ penwell_otg_ulpi_write(&pnw->iotg,
+ ULPI_OTGCTRLCLR, DRVVBUS | DRVVBUS_EXTERNAL);
+
+ evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt) {
+ dev_err(pnw->dev, "no memory for charging event");
+ return -ENOMEM;
+ }
+
+ evt->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+ INIT_LIST_HEAD(&evt->node);
+
+ if ((!enabled) && (pnw->iotg.hsm.id == ID_ACA_A
+ || pnw->iotg.hsm.id == ID_ACA_B
+ || pnw->iotg.hsm.id == ID_ACA_C)) {
+ evt->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+ evt->cap.ma = CHRG_CURR_ACA;
+ evt->cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+ } else {
+ dev_info(pnw->dev, "notification: turn %s VBUS\n",
+ enabled ? "ON" : "OFF");
+ atomic_notifier_call_chain(&pnw->iotg.otg.notifier,
+ USB_EVENT_DRIVE_VBUS, &enabled);
+ kfree(evt);
+ goto done;
+ }
+
+ dev_dbg(pnw->dev, "set_vbus ma = %d, event = %d, type = %s\n",
+ evt->cap.ma, evt->cap.chrg_evt,
+ psc_string(evt->cap.chrg_type));
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ list_add_tail(&evt->node, &pnw->chrg_evt_queue);
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+
+ goto done;
+ }
+
+ if (pnw->otg_pdata->gpio_vbus) {
+ dev_info(pnw->dev, "Turn %s VBUS using GPIO pin %d\n",
+ enabled ? "on" : "off", pnw->otg_pdata->gpio_vbus);
+ gpio_direction_output(pnw->otg_pdata->gpio_vbus,
+ enabled ? 1 : 0);
+ goto done;
+ }
+
+ data = enabled ? VOTGEN : 0;
+
+ mutex_lock(&pnw->msic_mutex);
+
+ retval = intel_scu_ipc_update_register(MSIC_VOTGCNT, data, VOTGEN);
+
+ if (retval)
+ dev_err(pnw->dev, "Fail to set power on OTG Port\n");
+
+ mutex_unlock(&pnw->msic_mutex);
+
+done:
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return retval;
+}
+
+static int penwell_otg_ulpi_run(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ val = readl(pnw->iotg.base + CI_ULPIVP);
+
+ if (val & ULPI_RUN)
+ return 1;
+
+ dev_dbg(pnw->dev, "%s: ULPI command done\n", __func__);
+ return 0;
+}
+
+/* io_ops to access ulpi registers */
+static int
+penwell_otg_ulpi_read(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val32 = 0;
+ int count;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev, "%s - addr 0x%x\n", __func__, reg);
+
+ spin_lock_irqsave(&pnw->lock, flags);
+
+ /* Port = 0 */
+ val32 = ULPI_RUN | reg << 16;
+ writel(val32, pnw->iotg.base + CI_ULPIVP);
+
+ /* Polling at least 2ms for read operation to complete*/
+ count = 400;
+
+ while (count) {
+ val32 = readl(pnw->iotg.base + CI_ULPIVP);
+ if (val32 & ULPI_RUN) {
+ count--;
+ udelay(5);
+ } else {
+ *val = (u8)((val32 & ULPI_DATRD) >> 8);
+ dev_dbg(pnw->dev,
+ "%s - done data 0x%x\n", __func__, *val);
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ return 0;
+ }
+ }
+
+ dev_warn(pnw->dev, "%s - addr 0x%x timeout\n", __func__, reg);
+
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ return -ETIMEDOUT;
+
+}
+
+static int
+penwell_otg_ulpi_write(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val32 = 0;
+ int count;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev,
+ "%s - addr 0x%x - data 0x%x\n", __func__, reg, val);
+
+ spin_lock_irqsave(&pnw->lock, flags);
+
+ /* Port = 0 */
+ val32 = ULPI_RUN | ULPI_RW | reg << 16 | val;
+ writel(val32, pnw->iotg.base + CI_ULPIVP);
+
+ /* Polling at least 2ms for write operation to complete*/
+ count = 400;
+
+ while (count && penwell_otg_ulpi_run()) {
+ count--;
+ udelay(5);
+ }
+
+ dev_dbg(pnw->dev, "%s - addr 0x%x %s\n", __func__, reg,
+ count ? "complete" : "timeout");
+
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ return count ? 0 : -ETIMEDOUT;
+}
+
+int pnw_otg_ulpi_write(u8 reg, u8 val)
+{
+ return penwell_otg_ulpi_write(&the_transceiver->iotg,
+ reg, val);
+}
+EXPORT_SYMBOL_GPL(pnw_otg_ulpi_write);
+
+static enum msic_vendor penwell_otg_check_msic(void)
+{
+ /* Return MSIC_VD_TI directly */
+ return MSIC_VD_TI;
+}
+
+/* Monitor function check if SRP initial conditions. Use polling on current
+ * status for b_ssend_srp, b_se0_srp */
+static void penwell_otg_mon_bus(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int count = 6;
+ int interval = 300; /* ms */
+ u32 val = 0;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+ pnw->iotg.hsm.b_ssend_srp = 0;
+ pnw->iotg.hsm.b_se0_srp = 0;
+
+ while (count) {
+ msleep(interval);
+
+ /* Check VBus status */
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ if (!(val & OTGSC_BSE))
+ return;
+
+ val = readl(pnw->iotg.base + CI_PORTSC1);
+ if (val & PORTSC_LS)
+ return;
+
+ count--;
+ }
+
+ pnw->iotg.hsm.b_ssend_srp = 1;
+ pnw->iotg.hsm.b_se0_srp = 1;
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* HNP polling function */
+/* The timeout callback function which polls the host request flag for HNP */
+static void penwell_otg_hnp_poll_fn(unsigned long indicator)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ queue_work(pnw->qwork, &pnw->hnp_poll_work);
+}
+
+/* Start HNP polling */
+/* Call this function with iotg->hnp_poll_lock held */
+static int penwell_otg_add_hnp_poll_timer(struct intel_mid_otg_xceiv *iotg,
+ unsigned long delay)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long j = jiffies;
+
+ pnw->hnp_poll_timer.data = 1;
+ pnw->hnp_poll_timer.function = penwell_otg_hnp_poll_fn;
+ pnw->hnp_poll_timer.expires = j + msecs_to_jiffies(delay);
+
+ add_timer(&pnw->hnp_poll_timer);
+
+ return 0;
+}
+
+static int penwell_otg_start_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+ if (pnw->iotg.hsm.hnp_poll_enable) {
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+ dev_dbg(pnw->dev, "HNP polling is already enabled\n");
+ return 0;
+ }
+
+ /* mark HNP polling enabled and start HNP polling in 50ms */
+ pnw->iotg.hsm.hnp_poll_enable = 1;
+ penwell_otg_add_hnp_poll_timer(&pnw->iotg, 50);
+
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+ return 0;
+}
+
+static int penwell_otg_continue_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+ if (!pnw->iotg.hsm.hnp_poll_enable) {
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+ dev_dbg(pnw->dev, "HNP polling is disabled, stop polling\n");
+ return 0;
+ }
+
+ penwell_otg_add_hnp_poll_timer(&pnw->iotg, THOS_REQ_POL);
+
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+ return 0;
+}
+
+/* Stop HNP polling */
+static int penwell_otg_stop_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+ if (!pnw->iotg.hsm.hnp_poll_enable) {
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+ dev_dbg(pnw->dev, "HNP polling is already disabled\n");
+ return 0;
+ }
+
+ pnw->iotg.hsm.hnp_poll_enable = 0;
+ del_timer_sync(&pnw->hnp_poll_timer);
+
+ spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+ return 0;
+}
+
+/* Start SRP function */
+static int penwell_otg_start_srp(struct usb_otg *otg)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ val = readl(pnw->iotg.base + CI_OTGSC);
+
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
+ pnw->iotg.base + CI_OTGSC);
+
+ /* Check if the data plus is finished or not */
+ msleep(8);
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ if (val & (OTGSC_HADP | OTGSC_DP))
+ dev_warn(pnw->dev, "DataLine SRP Error\n");
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return 0;
+}
+
+/* stop SOF via bus_suspend */
+static void penwell_otg_loc_sof(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct usb_hcd *hcd;
+ int err;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "resume" : "suspend");
+
+ hcd = bus_to_hcd(pnw->iotg.otg.otg->host);
+ if (on)
+ err = hcd->driver->bus_resume(hcd);
+ else
+ err = hcd->driver->bus_suspend(hcd);
+
+ if (err)
+ dev_warn(pnw->dev, "Fail to resume/suspend USB bus -%d\n", err);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void penwell_otg_phy_low_power(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ val = readl(pnw->iotg.base + CI_USBMODE);
+ if (!(val & USBMODE_CM)) {
+ dev_err(pnw->dev,
+ "PHY can't enter low power mode "
+ "when UDC is in idle mode\n");
+ set_client_mode();
+ }
+
+ val = readl(pnw->iotg.base + CI_HOSTPC1);
+ dev_dbg(pnw->dev, "---> Register CI_HOSTPC1 = %x\n", val);
+
+ if (on) {
+ if (val & HOSTPC1_PHCD) {
+ dev_dbg(pnw->dev, "already in Low power mode\n");
+ return;
+ }
+ writel(val | HOSTPC1_PHCD, pnw->iotg.base + CI_HOSTPC1);
+ } else {
+ if (!(val & HOSTPC1_PHCD)) {
+ dev_dbg(pnw->dev, "already in Normal mode\n");
+ return;
+ }
+ writel(val & ~HOSTPC1_PHCD, pnw->iotg.base + CI_HOSTPC1);
+ }
+
+ val = readl(pnw->iotg.base + CI_HOSTPC1);
+
+ dev_dbg(pnw->dev, "<--- Register CI_HOSTPC1 = %x\n", val);
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/*
+ * For Penwell, VBUS330 is the power rail to otg PHY inside MSIC, set it
+ * into low power mode or normal mode according to pm state.
+ * Call this function when spi access to MSIC registers is enabled.
+ *
+ * For Clovertrail, we don't have a controllable power rail to the PHY -
+ * it's always on.
+ */
+static int penwell_otg_vusb330_low_power(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 data;
+ int retval = 0;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ if (!is_clovertrail(to_pci_dev(pnw->dev))) {
+ if (on)
+ data = 0x5; /* Low power mode */
+ else
+ data = 0x7; /* Normal mode */
+ retval = penwell_otg_msic_write(MSIC_VUSB330CNT, data);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return retval;
+}
+
+/* Enable/Disable OTG interrupt */
+static void penwell_otg_intr(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ /* mask W/C bits to avoid clearing them when
+ * val is written back to OTGSC */
+ val &= ~OTGSC_INTSTS_MASK;
+ if (on) {
+ val = val | (OTGSC_INTEN_MASK);
+ writel(val, pnw->iotg.base + CI_OTGSC);
+ } else {
+ val = val & ~(OTGSC_INTEN_MASK);
+ writel(val, pnw->iotg.base + CI_OTGSC);
+ }
+}
+
+/* set HAAR: Hardware Assist Auto-Reset */
+static void penwell_otg_HAAR(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ if (on)
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
+ pnw->iotg.base + CI_OTGSC);
+ else
+ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
+ pnw->iotg.base + CI_OTGSC);
+}
+
+/* set HABA: Hardware Assist B-Disconnect to A-Connect */
+static void penwell_otg_HABA(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ if (on)
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
+ pnw->iotg.base + CI_OTGSC);
+ else
+ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
+ pnw->iotg.base + CI_OTGSC);
+}
+
+/* read 8bit msic register */
+static int penwell_otg_msic_read(u16 addr, u8 *data)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int retval = intel_scu_ipc_ioread8(addr, data);
+ if (retval)
+ dev_warn(pnw->dev, "Failed to read MSIC register %x\n", addr);
+
+ return retval;
+}
+
+/* write 8bit msic register */
+static int penwell_otg_msic_write(u16 addr, u8 data)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int retval = 0;
+
+ retval = intel_scu_ipc_iowrite8(addr, data);
+ if (retval)
+ dev_warn(pnw->dev, "Failed to write MSIC register %x\n", addr);
+
+ return retval;
+}
+
+/* USB related register in MSIC can be access via SPI address and ulpi address
+ * Access the control register to switch */
+static void penwell_otg_msic_spi_access(bool enabled)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 data;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ /* Set ULPI ACCESS MODE */
+ data = enabled ? SPIMODE : 0;
+
+ penwell_otg_msic_write(MSIC_ULPIACCESSMODE, data);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* USB Battery Charger detection related functions */
+/* Data contact detection is the first step for charger detection */
+static int penwell_otg_data_contact_detect(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 data;
+ int count = 50;
+ int retval = 0;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ /* Enable SPI access */
+ penwell_otg_msic_spi_access(true);
+
+ /* Set POWER_CTRL_CLR */
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ /* Set FUNC_CTRL_SET */
+ retval = penwell_otg_msic_write(MSIC_FUNCTRLSET, OPMODE0);
+ if (retval)
+ return retval;
+
+ /* Set FUNC_CTRL_CLR */
+ retval = penwell_otg_msic_write(MSIC_FUNCTRLCLR, OPMODE1);
+ if (retval)
+ return retval;
+
+ /* Set OTG_CTRL_CLR */
+ retval = penwell_otg_msic_write(MSIC_OTGCTRLCLR,
+ DMPULLDOWN | DPPULLDOWN);
+ if (retval)
+ return retval;
+
+ /* Set POWER_CTRL_CLR */
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, SWCNTRL);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_msic_write(MSIC_VS3SET, DATACONEN | SWUSBDET);
+ if (retval)
+ return retval;
+
+ dev_dbg(pnw->dev, "Start Polling for Data contact detection!\n");
+
+ while (count) {
+ retval = intel_scu_ipc_ioread8(MSIC_USB_MISC, &data);
+ if (retval) {
+ dev_warn(pnw->dev, "Failed to read MSIC register\n");
+ return retval;
+ }
+
+ if (data & MISC_CHGDSERXDPINV) {
+ dev_dbg(pnw->dev, "Data contact detected!\n");
+ return 0;
+ }
+ count--;
+ /* Interval is 10 - 11ms */
+ usleep_range(10000, 11000);
+ }
+
+ dev_dbg(pnw->dev, "Data contact Timeout\n");
+
+ retval = penwell_otg_msic_write(MSIC_VS3CLR, DATACONEN | SWUSBDET);
+ if (retval)
+ return retval;
+
+ udelay(100);
+
+ retval = penwell_otg_msic_write(MSIC_VS3SET, SWUSBDET);
+ if (retval)
+ return retval;
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return 0;
+}
+
+static int penwell_otg_charger_detect(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ msleep(125);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return 0;
+}
+
+static int penwell_otg_charger_type_detect(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ enum usb_charger_type charger;
+ u8 data;
+ int retval;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ retval = penwell_otg_msic_write(MSIC_VS3CLR, DATACONEN);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLSET, DPWKPUEN | SWCNTRL);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_msic_write(MSIC_OTGCTRLCLR,
+ DMPULLDOWN | DPPULLDOWN);
+ if (retval)
+ return retval;
+
+ msleep(55);
+
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR,
+ SWCNTRL | DPWKPUEN | HWDET);
+ if (retval)
+ return retval;
+
+ msleep(1);
+
+ /* Enable ULPI mode */
+ penwell_otg_msic_spi_access(false);
+
+ retval = intel_scu_ipc_ioread8(MSIC_SPWRSRINT1, &data);
+ if (retval) {
+ dev_warn(pnw->dev, "Failed to read MSIC register\n");
+ return retval;
+ }
+
+ switch (data & MSIC_SPWRSRINT1_MASK) {
+ case SPWRSRINT1_SDP:
+ charger = CHRG_SDP;
+ break;
+ case SPWRSRINT1_DCP:
+ charger = CHRG_DCP;
+ break;
+ case SPWRSRINT1_CDP:
+ charger = CHRG_CDP;
+ break;
+ default:
+ charger = CHRG_UNKNOWN;
+ break;
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return charger;
+}
+
+/* manual charger detection by ULPI access */
+static int penwell_otg_manual_chrg_det(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg;
+ int retval;
+ u8 data, data1, data2;
+ unsigned long timeout, interval;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ iotg = &pnw->iotg;
+
+ dev_info(pnw->dev, "USB charger detection start...\n");
+
+ /* WA for invalid (SE1) charger: add DCD & SE1 detection over SPI
+ * instead of ULPI interface to avoid any ulpi read/write failures
+ * with SE1 charger */
+ penwell_otg_msic_spi_access(true);
+
+ retval = penwell_otg_msic_write(MSIC_FUNCTRLCLR,
+ OPMODE1 | TERMSELECT | XCVRSELECT1);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+
+ retval = penwell_otg_msic_write(MSIC_FUNCTRLSET, OPMODE0 | XCVRSELECT0);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+
+ retval = penwell_otg_msic_write(MSIC_PWRCTRLSET, SWCNTRL);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+
+ retval = penwell_otg_msic_write(MSIC_VS3SET, CHGD_IDP_SRC);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+
+ dev_info(pnw->dev, "charger detection DCD start...\n");
+
+ /* Check DCD result, use same polling parameter */
+ timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+ interval = DATACON_INTERVAL * 1000; /* us */
+
+ dev_info(pnw->dev, "DCD started!\n");
+
+ /* Delay TIDP_SRC_ON + TCHGD_SERX_DEB */
+ usleep_range(66500, 67000);
+
+ while (!time_after(jiffies, timeout)) {
+ retval = penwell_otg_msic_read(MSIC_VS4, &data);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+ if (!(data & CHRG_SERX_DP)) {
+ dev_info(pnw->dev, "Data contact detected!\n");
+ break;
+ }
+
+ /* Polling interval */
+ usleep_range(interval, interval + 2000);
+ }
+
+ retval = penwell_otg_msic_write(MSIC_VS3CLR, CHGD_IDP_SRC);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+
+ dev_info(pnw->dev, "DCD complete\n");
+
+ /* Check for SE1, Linestate = '11' */
+ retval = penwell_otg_msic_read(MSIC_DEBUG, &data);
+ if (retval) {
+ penwell_otg_msic_spi_access(false);
+ return retval;
+ }
+ dev_info(pnw->dev, "MSIC.DEBUG.LINESTATE.D[1:0] = 0x%02X\n", data);
+
+ data &= LINESTATE_MSK;
+ if (data == LINESTATE_SE1) {
+ dev_info(pnw->dev, "SE1 Detected\n");
+ penwell_otg_msic_spi_access(false);
+ return CHRG_SE1;
+ }
+
+ penwell_otg_msic_spi_access(false);
+
+ dev_info(pnw->dev, "Primary Detection start...\n");
+
+ /* Primary Dection config */
+ /* ulpi_write(0x0b, 0x06) */
+ retval = penwell_otg_ulpi_write(iotg, ULPI_OTGCTRLSET,
+ DMPULLDOWN | DPPULLDOWN);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ msleep(125);
+
+ /* Check result SDP vs CDP/DCP */
+ retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data1);
+ if (retval)
+ return retval;
+
+ data1 = data1 & VDATDET;
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data2);
+ if (retval)
+ return retval;
+
+ data2 = data2 & CHRG_SERX_DM;
+
+ if (!data1 || data2) {
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR,
+ DPVSRCEN);
+ if (retval)
+ return retval;
+
+ dev_info(pnw->dev, "USB Charger Detection done\n");
+ return CHRG_SDP;
+ }
+
+ /* start detection on CDP vs DCP */
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ /* sleep 1ms between Primary and Secondary detection */
+ usleep_range(1000, 1200);
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS1CLR, DATAPOLARITY);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ msleep(85);
+
+ /* read result on CDP vs DCP */
+ retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data);
+ if (retval)
+ return retval;
+
+ data = data & VDATDET;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, DATAPOLARITY);
+ if (retval)
+ return retval;
+
+ dev_info(pnw->dev, "USB Charger Detection done\n");
+
+ if (data) {
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET,
+ DPVSRCEN);
+ if (retval)
+ return retval;
+
+ return CHRG_DCP;
+ } else
+ return CHRG_CDP;
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+};
+
+static int penwell_otg_charger_det_dcd_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ unsigned long timeout, interval;
+ u8 data;
+
+ /* Change OPMODE to '01' Non-driving */
+ retval = penwell_otg_ulpi_write(iotg, ULPI_FUNCTRLSET,
+ OPMODE0 | XCVRSELECT0);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_FUNCTRLCLR,
+ OPMODE1 | XCVRSELECT1 | TERMSELECT);
+ if (retval)
+ return retval;
+
+ /* Enable SW control */
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, SWCNTRL);
+ if (retval)
+ return retval;
+
+ /* Clear HWDETECT result for safety */
+ penwell_otg_charger_hwdet(false);
+
+ /* Enable IDPSRC */
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS3SET, CHGD_IDP_SRC);
+ if (retval)
+ return retval;
+
+ /* Check DCD result, use same polling parameter */
+ timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+ interval = DATACON_INTERVAL * 1000; /* us */
+
+ dev_info(pnw->dev, "DCD started\n");
+
+ /* Delay TIDP_SRC_ON + TCHGD_SERX_DEB = 347.8us + 66.1ms max */
+ usleep_range(66500, 67000);
+
+ while (!time_after(jiffies, timeout)) {
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data);
+ if (retval) {
+ dev_warn(pnw->dev, "Failed to read ULPI register\n");
+ return retval;
+ }
+
+ dev_dbg(pnw->dev, "VS4 = 0x%02x.. DP = bit1\n", data);
+
+ if (!(data & CHRG_SERX_DP)) {
+ dev_info(pnw->dev, "Data contact detected!\n");
+ break;
+ }
+
+ /* Polling interval */
+ usleep_range(interval, interval + 2000);
+ }
+
+ /* ulpi_write(0x87, 0x40)*/
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS3CLR, CHGD_IDP_SRC);
+ if (retval)
+ return retval;
+
+ dev_info(pnw->dev, "DCD complete\n");
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_aca_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ u8 data;
+ u8 usb_vs2_sts = 0;
+ u8 usb_vs2_latch = 0;
+ u8 usb_vdat_det = 0;
+ u8 usb_vdm = 0;
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS2LATCH, &usb_vs2_latch);
+ dev_dbg(pnw->dev, "%s: usb_vs2_latch = 0x%x\n",
+ __func__, usb_vs2_latch);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return retval;
+ }
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS2STS, &usb_vs2_sts);
+ dev_dbg(pnw->dev, "%s: usb_vs2_sts = 0x%x\n",
+ __func__, usb_vs2_sts);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return retval;
+ }
+
+ if (usb_vs2_latch & IDRARBRC_MSK) {
+ switch (IDRARBRC_STS(usb_vs2_sts)) {
+ case IDRARBRC_A:
+ iotg->hsm.id = ID_ACA_A;
+ break;
+ case IDRARBRC_B:
+ iotg->hsm.id = ID_ACA_B;
+ dev_info(pnw->dev, "ACA-B detected\n");
+ break;
+ case IDRARBRC_C:
+ iotg->hsm.id = ID_ACA_C;
+ dev_info(pnw->dev, "ACA-C detected\n");
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (iotg->hsm.id == ID_ACA_A) {
+ retval = penwell_otg_ulpi_write(iotg,
+ ULPI_PWRCTRLSET, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ msleep(70);
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data);
+ usb_vdat_det = data & VDATDET;
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return retval;
+ }
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data);
+ usb_vdm = data & CHRG_SERX_DM;
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return retval;
+ }
+
+ retval = penwell_otg_ulpi_write(iotg,
+ ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ if (usb_vdat_det && !usb_vdm)
+ dev_info(pnw->dev, "ACA-Dock detected\n");
+ else if (!usb_vdat_det && usb_vdm)
+ dev_info(pnw->dev, "ACA-A detected\n");
+ }
+
+ if (iotg->hsm.id == ID_ACA_A || iotg->hsm.id == ID_ACA_B
+ || iotg->hsm.id == ID_ACA_C) {
+ return CHRG_ACA;
+ }
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_se1_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ u8 data;
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_DEBUG, &data);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return -EBUSY;
+ }
+
+ if ((data & CHRG_SERX_DP) && (data & CHRG_SERX_DM))
+ return CHRG_SE1;
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_pri_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ u8 vdat_det, serx_dm;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ msleep(110);
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &vdat_det);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return -EBUSY;
+ }
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &serx_dm);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return -EBUSY;
+ }
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ vdat_det &= VDATDET;
+ serx_dm &= CHRG_SERX_DM;
+
+ if ((!vdat_det) || serx_dm)
+ return CHRG_SDP;
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_sec_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ u8 vdat_det;
+
+ usleep_range(1000, 1500);
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS1CLR, DATAPOLARITY);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ msleep(80);
+
+ retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &vdat_det);
+ if (retval) {
+ dev_warn(pnw->dev, "ULPI read failed, exit\n");
+ return retval;
+ }
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ return retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, DATAPOLARITY);
+ if (retval)
+ return retval;
+
+ vdat_det &= VDATDET;
+
+ if (vdat_det)
+ return CHRG_DCP;
+ else
+ return CHRG_CDP;
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_clean_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET,
+ DPVSRCEN | SWCNTRL);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+/* As we use SW mode to do charger detection, need to notify HW
+ * the result SW get, charging port or not */
+static int penwell_otg_charger_hwdet(bool enable)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+
+ /* This is for CLV only */
+ if (!is_clovertrail(to_pci_dev(pnw->dev)))
+ return 0;
+
+ if (enable) {
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, HWDET);
+ if (retval)
+ return retval;
+ dev_dbg(pnw->dev, "set HWDETECT\n");
+ } else {
+ retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, HWDET);
+ if (retval)
+ return retval;
+ dev_dbg(pnw->dev, "clear HWDETECT\n");
+ }
+
+ return 0;
+}
+
+static int penwell_otg_charger_det_clt(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int retval;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ /* DCD */
+ retval = penwell_otg_charger_det_dcd_clt();
+ if (retval) {
+ dev_warn(pnw->dev, "DCD failed, exit\n");
+ return retval;
+ }
+
+ /* ACA Detection */
+ retval = penwell_otg_charger_det_aca_clt();
+ if (retval < 0) {
+ dev_warn(pnw->dev, "ACA Det failed, exit\n");
+ return retval;
+ } else if (retval == CHRG_ACA) {
+ dev_info(pnw->dev, "ACA detected\n");
+ penwell_otg_charger_hwdet(true);
+ return CHRG_ACA;
+ }
+
+ /* SE1 Detection */
+ retval = penwell_otg_charger_det_se1_clt();
+ if (retval < 0) {
+ dev_warn(pnw->dev, "SE1 Det failed, exit\n");
+ return retval;
+ } else if (retval == CHRG_SE1) {
+ dev_info(pnw->dev, "SE1 detected\n");
+ penwell_otg_charger_hwdet(true);
+ return CHRG_SE1;
+ }
+
+ /* Pri Det */
+ retval = penwell_otg_charger_det_pri_clt();
+ if (retval < 0) {
+ dev_warn(pnw->dev, "Pri Det failed, exit\n");
+ return retval;
+ } else if (retval == CHRG_SDP) {
+ dev_info(pnw->dev, "SDP detected\n");
+ return CHRG_SDP;
+ }
+
+ /* Sec Det */
+ retval = penwell_otg_charger_det_sec_clt();
+ if (retval < 0) {
+ dev_warn(pnw->dev, "Sec Det failed, exit\n");
+ return retval;
+ } else if (retval == CHRG_CDP) {
+ dev_info(pnw->dev, "CDP detected\n");
+ penwell_otg_charger_hwdet(true);
+ return CHRG_CDP;
+ } else if (retval == CHRG_DCP) {
+ dev_info(pnw->dev, "DCP detected\n");
+ penwell_otg_charger_det_clean_clt();
+ penwell_otg_charger_hwdet(true);
+ return CHRG_DCP;
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return 0;
+}
+
+void penwell_otg_phy_vbus_wakeup(bool on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ u8 flag = 0;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ penwell_otg_msic_spi_access(true);
+
+ flag = VBUSVLD | SESSVLD | SESSEND;
+
+ if (is_clovertrail(to_pci_dev(pnw->dev))) {
+ if (on) {
+ penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_RISINGSET,
+ flag);
+ penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_FALLINGSET,
+ flag);
+ } else {
+ penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_RISINGCLR,
+ flag);
+ penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_FALLINGCLR,
+ flag);
+ }
+ } else {
+ if (on) {
+ penwell_otg_msic_write(MSIC_USBINTEN_RISESET, flag);
+ penwell_otg_msic_write(MSIC_USBINTEN_FALLSET, flag);
+ } else {
+ penwell_otg_msic_write(MSIC_USBINTEN_RISECLR, flag);
+ penwell_otg_msic_write(MSIC_USBINTEN_FALLCLR, flag);
+ }
+ }
+
+ penwell_otg_msic_spi_access(false);
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+}
+
+void penwell_otg_phy_intr(bool on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u8 flag = 0;
+ int retval;
+ u8 data;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ penwell_otg_msic_spi_access(true);
+
+ flag = VBUSVLD | IDGND;
+
+ if (on) {
+ dev_info(pnw->dev, "enable VBUSVLD & IDGND\n");
+ penwell_otg_msic_write(MSIC_USBINTEN_RISESET, flag);
+ penwell_otg_msic_write(MSIC_USBINTEN_FALLSET, flag);
+ } else {
+ dev_info(pnw->dev, "disable VBUSVLD & IDGND\n");
+ penwell_otg_msic_write(MSIC_USBINTEN_RISECLR, flag);
+ penwell_otg_msic_write(MSIC_USBINTEN_FALLCLR, flag);
+ }
+
+ retval = intel_scu_ipc_ioread8(MSIC_USBINTEN_RISE, &data);
+ if (retval)
+ dev_warn(pnw->dev, "Failed to read MSIC register\n");
+ else
+ dev_info(pnw->dev, "MSIC_USBINTEN_RISE = 0x%x", data);
+
+ retval = intel_scu_ipc_ioread8(MSIC_USBINTEN_FALL, &data);
+ if (retval)
+ dev_warn(pnw->dev, "Failed to read MSIC register\n");
+ else
+ dev_info(pnw->dev, "MSIC_USBINTEN_FALL = 0x%x", data);
+
+ penwell_otg_msic_spi_access(false);
+}
+
+void penwell_otg_phy_power(int on)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (is_clovertrail(to_pci_dev(pnw->dev))) {
+ dev_dbg(pnw->dev, "turn %s USB PHY by gpio_cs(%d)\n",
+ on ? "on" : "off",
+ pnw->otg_pdata->gpio_cs);
+ gpio_direction_output(pnw->otg_pdata->gpio_cs, on);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+void penwell_otg_phy_reset(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (is_clovertrail(to_pci_dev(pnw->dev))) {
+ gpio_direction_output(pnw->otg_pdata->gpio_reset, 0);
+ usleep_range(200, 500);
+ gpio_set_value(pnw->otg_pdata->gpio_reset, 1);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+#if 0
+knext_wa: usb_notify_warning should be implemented again instead of in hub.c
+static void penwell_otg_notify_warning(int warning_code)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s ---> %d\n", __func__, warning_code);
+
+ if (pnw && pnw->iotg.otg.otg->host && pnw->iotg.otg.otg->host->root_hub)
+ usb_notify_warning(pnw->iotg.otg.otg->host->root_hub,
+ warning_code);
+ else
+ dev_dbg(pnw->dev, "no valid device for notification\n");
+
+ dev_dbg(pnw->dev, "%s <--- %d\n", __func__, warning_code);
+}
+#else
+#define penwell_otg_notify_warning(x)
+#endif
+
+void penwell_otg_nsf_msg(unsigned long indicator)
+{
+ switch (indicator) {
+ case 2:
+ case 4:
+ case 6:
+ case 7:
+ dev_warn(the_transceiver->dev,
+ "NSF-%lu - device not responding\n", indicator);
+ break;
+ case 3:
+ dev_warn(the_transceiver->dev,
+ "NSF-%lu - device not supported\n", indicator);
+ break;
+ default:
+ dev_warn(the_transceiver->dev,
+ "Do not have this kind of NSF\n");
+ break;
+ }
+}
+
+/* The timeout callback function to set time out bit */
+static void penwell_otg_timer_fn(unsigned long indicator)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ *(int *)indicator = 1;
+
+ dev_dbg(pnw->dev, "kernel timer - timeout\n");
+
+ penwell_update_transceiver();
+}
+
+/* kernel timer used for OTG timing */
+static void penwell_otg_add_timer(enum penwell_otg_timer_type timers)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ unsigned long j = jiffies;
+ unsigned long data, time;
+
+ if (timer_pending(&pnw->hsm_timer))
+ return;
+
+ switch (timers) {
+ case TA_WAIT_VRISE_TMR:
+ iotg->hsm.a_wait_vrise_tmout = 0;
+ data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
+ /* Charger HW limitation workaround for CLV */
+ time = is_clovertrail(to_pci_dev(pnw->dev)) ?
+ 400 : TA_WAIT_VRISE;
+ dev_dbg(pnw->dev,
+ "Add timer TA_WAIT_VRISE = %lu\n", time);
+ break;
+ case TA_WAIT_BCON_TMR:
+ iotg->hsm.a_wait_bcon_tmout = 0;
+ data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
+ time = TA_WAIT_BCON;
+ dev_dbg(pnw->dev,
+ "Add timer TA_WAIT_BCON = %d\n", TA_WAIT_BCON);
+ break;
+ case TA_AIDL_BDIS_TMR:
+ iotg->hsm.a_aidl_bdis_tmout = 0;
+ data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
+ time = TA_AIDL_BDIS;
+ dev_dbg(pnw->dev,
+ "Add timer TA_AIDL_BDIS = %d\n", TA_AIDL_BDIS);
+ break;
+ case TA_BIDL_ADIS_TMR:
+ iotg->hsm.a_bidl_adis_tmout = 0;
+ iotg->hsm.a_bidl_adis_tmr = 1;
+ data = (unsigned long)&iotg->hsm.a_bidl_adis_tmout;
+ time = TA_BIDL_ADIS;
+ dev_dbg(pnw->dev,
+ "Add timer TA_BIDL_ADIS = %d\n", TA_BIDL_ADIS);
+ break;
+ case TA_WAIT_VFALL_TMR:
+ iotg->hsm.a_wait_vfall_tmout = 0;
+ data = (unsigned long)&iotg->hsm.a_wait_vfall_tmout;
+ time = TA_WAIT_VFALL;
+ dev_dbg(pnw->dev,
+ "Add timer TA_WAIT_VFALL = %d\n", TA_WAIT_VFALL);
+ break;
+ case TB_ASE0_BRST_TMR:
+ iotg->hsm.b_ase0_brst_tmout = 0;
+ data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
+ time = TB_ASE0_BRST;
+ dev_dbg(pnw->dev,
+ "Add timer TB_ASE0_BRST = %d\n", TB_ASE0_BRST);
+ break;
+ case TB_SRP_FAIL_TMR:
+ iotg->hsm.b_srp_fail_tmout = 0;
+ iotg->hsm.b_srp_fail_tmr = 1;
+ data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
+ time = TB_SRP_FAIL;
+ dev_dbg(pnw->dev,
+ "Add timer TB_SRP_FAIL = %d\n", TB_SRP_FAIL);
+ break;
+ /* support OTG test mode */
+ case TTST_MAINT_TMR:
+ iotg->hsm.tst_maint_tmout = 0;
+ data = (unsigned long)&iotg->hsm.tst_maint_tmout;
+ time = TTST_MAINT;
+ dev_dbg(pnw->dev,
+ "Add timer TTST_MAINT = %d\n", TTST_MAINT);
+ break;
+ case TTST_NOADP_TMR:
+ iotg->hsm.tst_noadp_tmout = 0;
+ data = (unsigned long)&iotg->hsm.tst_noadp_tmout;
+ time = TTST_NOADP;
+ dev_dbg(pnw->dev,
+ "Add timer TTST_NOADP = %d\n", TTST_NOADP);
+ break;
+ default:
+ dev_dbg(pnw->dev,
+ "unkown timer, can not enable such timer\n");
+ return;
+ }
+
+ pnw->hsm_timer.data = data;
+ pnw->hsm_timer.function = penwell_otg_timer_fn;
+ pnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
+
+ add_timer(&pnw->hsm_timer);
+}
+
+static inline void penwell_otg_del_timer(enum penwell_otg_timer_type timers)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ switch (timers) {
+ case TA_BIDL_ADIS_TMR:
+ iotg->hsm.a_bidl_adis_tmr = 0;
+ break;
+ case TB_SRP_FAIL_TMR:
+ iotg->hsm.b_srp_fail_tmr = 0;
+ break;
+ case TA_WAIT_BCON_TMR:
+ iotg->hsm.a_wait_bcon_tmout = 0;
+ break;
+ case TTST_MAINT_TMR:
+ iotg->hsm.tst_maint_tmout = 0;
+ break;
+ case TTST_NOADP_TMR:
+ iotg->hsm.tst_noadp_tmout = 0;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(pnw->dev, "state machine timer deleted\n");
+ del_timer_sync(&pnw->hsm_timer);
+}
+
+static void reset_otg(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ u32 val;
+ int delay_time = 1000;
+
+ dev_dbg(pnw->dev, "reseting OTG controller ...\n");
+ val = readl(pnw->iotg.base + CI_USBCMD);
+ writel(val | USBCMD_RST, pnw->iotg.base + CI_USBCMD);
+ do {
+ udelay(100);
+ if (!delay_time--)
+ dev_dbg(pnw->dev, "reset timeout\n");
+ val = readl(pnw->iotg.base + CI_USBCMD);
+ val &= USBCMD_RST;
+ } while (val != 0);
+ dev_dbg(pnw->dev, "reset done.\n");
+}
+
+static void pnw_phy_ctrl_rst(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(pnw->dev);
+
+ /* mask id intr before reset and delay for 4 ms
+ * before unmasking id intr to avoid wrongly
+ * detecting ID_A which is a side-effect of reset
+ * PHY
+ */
+ penwell_otg_intr(0);
+ synchronize_irq(pdev->irq);
+
+ penwell_otg_phy_reset();
+
+ reset_otg();
+
+ msleep(50);
+ penwell_otg_intr(1);
+
+ /* after reset, need to sync to OTGSC status bits to hsm */
+ update_hsm();
+ penwell_update_transceiver();
+}
+
+static void set_host_mode(void)
+{
+ u32 val;
+
+ reset_otg();
+ val = readl(the_transceiver->iotg.base + CI_USBMODE);
+ val = (val & (~USBMODE_CM)) | USBMODE_HOST;
+ writel(val, the_transceiver->iotg.base + CI_USBMODE);
+}
+
+static void set_client_mode(void)
+{
+ u32 val;
+
+ reset_otg();
+ val = readl(the_transceiver->iotg.base + CI_USBMODE);
+ val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
+ writel(val, the_transceiver->iotg.base + CI_USBMODE);
+}
+
+static void init_hsm(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ u32 val32;
+
+ /* read OTGSC after reset */
+ val32 = readl(iotg->base + CI_OTGSC);
+ dev_dbg(pnw->dev,
+ "%s: OTGSC init value = 0x%x\n", __func__, val32);
+
+ /* set init state */
+ if (val32 & OTGSC_ID) {
+ iotg->hsm.id = ID_B;
+ iotg->otg.otg->default_a = 0;
+ set_client_mode();
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ } else {
+ iotg->hsm.id = ID_A;
+ iotg->otg.otg->default_a = 1;
+ set_host_mode();
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ }
+
+ /* set session indicator */
+ if (val32 & OTGSC_BSE)
+ iotg->hsm.b_sess_end = 1;
+ if (val32 & OTGSC_BSV)
+ iotg->hsm.b_sess_vld = 1;
+ if (val32 & OTGSC_ASV)
+ iotg->hsm.a_sess_vld = 1;
+ if (val32 & OTGSC_AVV)
+ iotg->hsm.a_vbus_vld = 1;
+
+ /* default user is not request the bus */
+ iotg->hsm.a_bus_req = 1;
+ iotg->hsm.a_bus_drop = 0;
+ /* init hsm means power_up case */
+ iotg->hsm.power_up = 0;
+ /* defautly don't request bus as B device */
+ iotg->hsm.b_bus_req = 0;
+ /* no system error */
+ iotg->hsm.a_clr_err = 0;
+
+ if (iotg->otg.state == OTG_STATE_A_IDLE) {
+ wake_lock(&pnw->wake_lock);
+ pm_runtime_get(pnw->dev);
+ }
+}
+
+static void update_hsm(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ u32 val32;
+
+ /* read OTGSC */
+ val32 = readl(iotg->base + CI_OTGSC);
+ dev_dbg(pnw->dev,
+ "%s OTGSC current value = 0x%x\n", __func__, val32);
+
+ iotg->hsm.id = !!(val32 & OTGSC_ID) ? ID_B : ID_A;
+ iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
+ iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
+ iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
+ iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
+}
+
+static void penwell_otg_eye_diagram_optimize(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval;
+ u8 value = 0;
+
+ /* Check platform value as different value will be used*/
+ if (is_clovertrail(to_pci_dev(pnw->dev))) {
+ /* Set 0x7f for better quality in eye diagram
+ * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+ value = 0x7f;
+ } else {
+ /* Set 0x77 for better quality in eye diagram
+ * It means ZHSDRV = 0b11 and IHSTX = 0b0111*/
+ value = 0x77;
+ }
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, value);
+ if (retval)
+ dev_warn(pnw->dev,
+ "eye diagram optimize failed with ulpi failure\n");
+}
+
+static irqreturn_t otg_dummy_irq(int irq, void *_dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ void __iomem *reg_base = _dev;
+ u32 val;
+ u32 int_mask = 0;
+
+ val = readl(reg_base + CI_USBMODE);
+ if ((val & USBMODE_CM) != USBMODE_DEVICE)
+ return IRQ_NONE;
+
+ val = readl(reg_base + CI_USBSTS);
+ int_mask = val & INTR_DUMMY_MASK;
+
+ if (int_mask == 0)
+ return IRQ_NONE;
+
+ /* clear hsm.b_conn here since host driver can't detect it
+ * otg_dummy_irq called means B-disconnect happened.
+ */
+ if (pnw->iotg.hsm.b_conn) {
+ pnw->iotg.hsm.b_conn = 0;
+ penwell_update_transceiver();
+ }
+
+ /* Clear interrupts */
+ writel(int_mask, reg_base + CI_USBSTS);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq_handle(struct penwell_otg *pnw,
+ struct intel_mid_otg_xceiv *iotg)
+{
+ int id = 0;
+ int flag = 0;
+ u32 int_sts, int_en, int_mask = 0;
+ u8 usb_vs2_latch = 0;
+ u8 usb_vs2_sts = 0;
+ struct iotg_ulpi_access_ops *ops;
+
+ /* Check VBUS/SRP interrup */
+ int_sts = readl(pnw->iotg.base + CI_OTGSC);
+ int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
+ int_mask = int_sts & int_en;
+
+ if (int_mask == 0)
+ return IRQ_NONE;
+
+ ops = &iotg->ulpi_ops;
+ ops->read(iotg, ULPI_VS2LATCH, &usb_vs2_latch);
+ dev_dbg(pnw->dev, "usb_vs2_latch = 0x%x\n", usb_vs2_latch);
+ if (usb_vs2_latch & IDRARBRC_MSK) {
+ ops->read(iotg, ULPI_VS2STS, &usb_vs2_sts);
+ dev_dbg(pnw->dev, "%s: usb_vs2_sts = 0x%x\n",
+ __func__, usb_vs2_sts);
+
+ switch (IDRARBRC_STS(usb_vs2_sts)) {
+ case IDRARBRC_A:
+ id = ID_ACA_A;
+ dev_dbg(pnw->dev, "ACA-A interrupt detected\n");
+ break;
+ case IDRARBRC_B:
+ id = ID_ACA_B;
+ dev_dbg(pnw->dev, "ACA-B interrupt detected\n");
+ break;
+ case IDRARBRC_C:
+ id = ID_ACA_C;
+ dev_dbg(pnw->dev, "ACA-C interrupt detected\n");
+ break;
+ default:
+ break;
+ }
+
+ if (id) {
+ iotg->hsm.id = id;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: id change int = %d\n",
+ __func__, iotg->hsm.id);
+ } else {
+ iotg->hsm.id = (int_sts & OTGSC_ID) ?
+ ID_B : ID_A;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: id change int = %d\n",
+ __func__, iotg->hsm.id);
+ }
+ }
+
+ if (int_mask) {
+ dev_dbg(pnw->dev,
+ "OTGSC = 0x%x, mask =0x%x\n", int_sts, int_mask);
+
+ if (int_mask & OTGSC_IDIS) {
+ if (!id)
+ iotg->hsm.id = (int_sts & OTGSC_ID) ?
+ ID_B : ID_A;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: id change int = %d\n",
+ __func__, iotg->hsm.id);
+
+ /* Update a_vbus_valid once ID changed */
+ iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+ }
+ if (int_mask & OTGSC_DPIS) {
+ iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: data pulse int = %d\n",
+ __func__, iotg->hsm.a_srp_det);
+ }
+ if (int_mask & OTGSC_BSEIS) {
+ iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: b sess end int = %d\n",
+ __func__, iotg->hsm.b_sess_end);
+ }
+ if (int_mask & OTGSC_BSVIS) {
+ iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: b sess valid int = %d\n",
+ __func__, iotg->hsm.b_sess_vld);
+ }
+ if (int_mask & OTGSC_ASVIS) {
+ iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: a sess valid int = %d\n",
+ __func__, iotg->hsm.a_sess_vld);
+ }
+ if (int_mask & OTGSC_AVVIS) {
+ iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+ flag = 1;
+ dev_dbg(pnw->dev, "%s: a vbus valid int = %d\n",
+ __func__, iotg->hsm.a_vbus_vld);
+ }
+
+ writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
+ pnw->iotg.base + CI_OTGSC);
+ }
+
+ if (flag)
+ penwell_update_transceiver();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq(int irq, void *_dev)
+{
+ struct penwell_otg *pnw = _dev;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ unsigned long flags;
+
+#ifdef CONFIG_PM_RUNTIME
+ if (pnw->rt_resuming)
+ return IRQ_HANDLED;
+
+ /* If it's not active, resume device first before access regs */
+ if (pnw->rt_quiesce) {
+ spin_lock_irqsave(&pnw->lock, flags);
+ if (pnw->rt_quiesce) {
+ dev_dbg(pnw->dev, "Wake up? Interrupt detected in suspended\n");
+ pnw->rt_resuming = 1;
+ pm_runtime_get(pnw->dev);
+ }
+ spin_unlock_irqrestore(&pnw->lock, flags);
+
+ return IRQ_HANDLED;
+ }
+#endif /* CONFIG_PM_RUNTIME */
+
+ return otg_irq_handle(pnw, iotg);
+}
+
+static void penwell_otg_start_ulpi_poll(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int retval = 0;
+
+ retval = penwell_otg_ulpi_write(&pnw->iotg, 0x16, 0x5a);
+ if (retval)
+ dev_err(pnw->dev, "ulpi write in init failed\n");
+
+ schedule_delayed_work(&pnw->ulpi_poll_work, HZ);
+}
+
+static void penwell_otg_continue_ulpi_poll(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ schedule_delayed_work(&pnw->ulpi_poll_work, HZ);
+}
+
+static void penwell_otg_stop_ulpi_poll(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ /* we have to use this version because this function can
+ * be called in irq handler */
+ __cancel_delayed_work(&pnw->ulpi_poll_work);
+}
+
+
+static int penwell_otg_iotg_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = data;
+ int flag = 0;
+ struct pci_dev *pdev;
+
+ if (iotg == NULL)
+ return NOTIFY_BAD;
+
+ if (pnw == NULL)
+ return NOTIFY_BAD;
+
+ pdev = to_pci_dev(pnw->dev);
+
+ switch (action) {
+ case MID_OTG_NOTIFY_CONNECT:
+ dev_dbg(pnw->dev, "PNW OTG Notify Connect Event\n");
+ if (iotg->otg.otg->default_a == 1)
+ iotg->hsm.b_conn = 1;
+ else
+ iotg->hsm.a_conn = 1;
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_DISCONN:
+ dev_dbg(pnw->dev, "PNW OTG Notify Disconnect Event\n");
+ if (iotg->otg.otg->default_a == 1)
+ iotg->hsm.b_conn = 0;
+ else
+ iotg->hsm.a_conn = 0;
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_HSUSPEND:
+ dev_dbg(pnw->dev, "PNW OTG Notify Host Bus suspend Event\n");
+ break;
+ case MID_OTG_NOTIFY_HRESUME:
+ dev_dbg(pnw->dev, "PNW OTG Notify Host Bus resume Event\n");
+ if (iotg->otg.otg->default_a == 1 && iotg->hsm.a_bus_req == 0) {
+ iotg->hsm.a_bus_req = 1;
+ flag = 1;
+ }
+ break;
+ case MID_OTG_NOTIFY_CSUSPEND:
+ dev_dbg(pnw->dev, "PNW OTG Notify Client Bus suspend Event\n");
+ if (iotg->otg.otg->default_a == 1) {
+ iotg->hsm.b_bus_suspend = 1;
+ flag = 1;
+ } else {
+ penwell_otg_stop_ulpi_poll();
+ if (iotg->hsm.a_bus_suspend == 0) {
+ iotg->hsm.a_bus_suspend = 1;
+ flag = 1;
+ } else
+ flag = 0;
+ }
+ break;
+ case MID_OTG_NOTIFY_CRESUME:
+ dev_dbg(pnw->dev, "PNW OTG Notify Client Bus resume Event\n");
+ if (iotg->otg.otg->default_a == 1) {
+ /* in A_PERIPHERAL state */
+ iotg->hsm.b_bus_suspend = 0;
+ flag = 1;
+ } else {
+ /* in B_PERIPHERAL state */
+ if (!is_clovertrail(pdev))
+ penwell_otg_start_ulpi_poll();
+ iotg->hsm.a_bus_suspend = 0;
+ flag = 0;
+ }
+ break;
+ case MID_OTG_NOTIFY_CRESET:
+ dev_dbg(pnw->dev, "PNW OTG Notify Client Bus reset Event\n");
+ penwell_otg_set_power(&pnw->iotg.otg, CHRG_CURR_SDP_SUSP);
+ flag = 0;
+ break;
+ case MID_OTG_NOTIFY_HOSTADD:
+ dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver Add\n");
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_HOSTREMOVE:
+ dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver remove\n");
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_CLIENTADD:
+ dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver Add\n");
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_CLIENTREMOVE:
+ dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver remove\n");
+ flag = 1;
+ break;
+ /* Test mode support */
+ case MID_OTG_NOTIFY_TEST_MODE_START:
+ dev_dbg(pnw->dev, "PNW OTG Notfiy Client Testmode Start\n");
+ iotg->hsm.in_test_mode = 1;
+ flag = 0;
+ break;
+ case MID_OTG_NOTIFY_TEST_MODE_STOP:
+ dev_dbg(pnw->dev, "PNW OTG Notfiy Client Testmode Stop\n");
+ iotg->hsm.in_test_mode = 0;
+ flag = 0;
+ break;
+ case MID_OTG_NOTIFY_TEST_SRP_REQD:
+ dev_dbg(pnw->dev, "PNW OTG Notfiy Client SRP REQD\n");
+ iotg->hsm.otg_srp_reqd = 1;
+ flag = 1;
+ break;
+ case MID_OTG_NOTIFY_TEST:
+ dev_dbg(pnw->dev, "PNW OTG Notfiy Test device detected\n");
+ iotg->hsm.test_device = 1;
+ flag = 0;
+ break;
+ case MID_OTG_NOTIFY_TEST_VBUS_OFF:
+ dev_dbg(pnw->dev, "PNW OTG Notfiy Test device Vbus off mode\n");
+ iotg->hsm.test_device = 1;
+ iotg->hsm.otg_vbus_off = 1;
+ flag = 1;
+ break;
+ default:
+ dev_dbg(pnw->dev, "PNW OTG Nofity unknown notify message\n");
+ return NOTIFY_DONE;
+ }
+
+ if (flag)
+ penwell_update_transceiver();
+
+ return NOTIFY_OK;
+}
+
+static void penwell_otg_hnp_poll_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ struct usb_device *udev;
+ int err = 0;
+ u8 data;
+
+ if (iotg->otg.otg->host && iotg->otg.otg->host->root_hub) {
+ udev = usb_hub_find_child(iotg->otg.otg->host->root_hub, 1);
+ } else {
+ dev_warn(pnw->dev, "no host or root_hub registered\n");
+ return;
+ }
+
+ if (iotg->otg.state != OTG_STATE_A_HOST
+ && iotg->otg.state != OTG_STATE_B_HOST)
+ return;
+
+ if (!udev) {
+ dev_warn(pnw->dev,
+ "no usb dev connected, stop HNP polling\n");
+ return;
+ }
+
+ /* Skip HS Electrical Test Device */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x1A0A &&
+ le16_to_cpu(udev->descriptor.idProduct) > 0x0100 &&
+ le16_to_cpu(udev->descriptor.idProduct) < 0x0109) {
+ return;
+ }
+
+ /* get host request flag from connected USB device */
+ err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS, USB_DIR_IN, 0, 0xF000, &data, 1, 5000);
+
+ if (err < 0) {
+ dev_warn(pnw->dev,
+ "ERR in HNP polling = %d, stop HNP polling\n", err);
+ return;
+ }
+
+ if (data & HOST_REQUEST_FLAG) {
+ /* start HNP sequence to switch role */
+ dev_dbg(pnw->dev, "host_request_flag = 1\n");
+
+ if (iotg->hsm.id == ID_B) {
+ dev_dbg(pnw->dev,
+ "Device B host - start HNP - b_bus_req = 0\n");
+ iotg->hsm.b_bus_req = 0;
+ } else if (iotg->hsm.id == ID_A) {
+ dev_dbg(pnw->dev,
+ "Device A host - start HNP - a_bus_req = 0\n");
+ iotg->hsm.a_bus_req = 0;
+ }
+ penwell_update_transceiver();
+ } else {
+ dev_dbg(pnw->dev, "host_request_flag = 0\n");
+ penwell_otg_continue_hnp_poll(&pnw->iotg);
+ }
+}
+
+static int penwell_otg_ulpi_check(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ u8 data;
+ int retval;
+
+ retval = penwell_otg_ulpi_read(iotg, 0x16, &data);
+ if (retval) {
+ dev_err(pnw->dev,
+ "%s: [ ULPI hang ] detected\n"
+ "reset PHY & ctrl to recover\n",
+ __func__);
+ pnw_phy_ctrl_rst();
+ return retval;
+ }
+ return 0;
+}
+
+static void penwell_otg_ulpi_check_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int status;
+
+ status = pm_runtime_get_sync(pnw->dev);
+ if (status < 0) {
+ dev_err(pnw->dev, "%s: pm_runtime_get_sync FAILED err = %d\n",
+ __func__, status);
+ pm_runtime_put_sync(pnw->dev);
+ return;
+ }
+
+ if (iotg->otg.state == OTG_STATE_B_IDLE) {
+ /* Before charger detection or charger detection done */
+ dev_dbg(pnw->dev, "ulpi_check health\n");
+ penwell_otg_ulpi_check();
+ } else if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+ /* After charger detection, SDP/CDP is detected */
+ dev_dbg(pnw->dev, "ulpi_check health\n");
+ status = penwell_otg_ulpi_check();
+ if (status) {
+ /* After phy rst then restart peripheral stack */
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver not support\n");
+
+ if (iotg->start_peripheral)
+ iotg->start_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver not support\n");
+ }
+ }
+
+ pm_runtime_put(pnw->dev);
+}
+
+static void penwell_otg_uevent_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *uevent_envp[2] = { "USB_INTR=BOGUS", NULL };
+
+ dev_info(pnw->dev, "%s: send uevent USB_INTR=BOGUS\n", __func__);
+ kobject_uevent_env(&pnw->dev->kobj, KOBJ_CHANGE, uevent_envp);
+}
+
+static void penwell_otg_ulpi_poll_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ u8 data;
+
+ if (iotg->otg.state != OTG_STATE_B_PERIPHERAL)
+ return;
+
+ if (iotg->hsm.in_test_mode)
+ return;
+
+ if (penwell_otg_ulpi_read(iotg, 0x16, &data)) {
+ dev_err(pnw->dev, "ulpi read time out by polling\n");
+ iotg->hsm.ulpi_error = 1;
+ iotg->hsm.ulpi_polling = 0;
+ penwell_update_transceiver();
+ } else if (data != 0x5A) {
+ dev_err(pnw->dev, "ulpi read value incorrect by polling\n");
+ iotg->hsm.ulpi_error = 1;
+ iotg->hsm.ulpi_polling = 0;
+ penwell_update_transceiver();
+ } else {
+ dev_dbg(pnw->dev, "ulpi fine by polling\n");
+ iotg->hsm.ulpi_error = 0;
+ iotg->hsm.ulpi_polling = 1;
+ penwell_otg_continue_ulpi_poll();
+ }
+}
+
+static void penwell_otg_psc_notify_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ struct power_supply_cable_props psc_cap;
+ enum power_supply_charger_event chrg_event;
+ unsigned long flags;
+ struct otg_bc_event *event, *temp;
+
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ list_for_each_entry_safe(event, temp, &pnw->chrg_evt_queue, node) {
+ list_del(&event->node);
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ spin_lock_irqsave(&pnw->cap_lock, flags);
+ chrg_event = check_psc_event(pnw->psc_cap, event->cap);
+ if (chrg_event == -1)
+ dev_dbg(pnw->dev, "no need to notify\n");
+ else if (chrg_event == POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+ /* In Disconnect case, EM driver needs same chrg type
+ * like Connect even, construct one here */
+ psc_cap = event->cap;
+ psc_cap.chrg_evt = chrg_event;
+ psc_cap.chrg_type = pnw->psc_cap.chrg_type;
+ pnw->psc_cap = event->cap;
+ pnw->psc_cap.chrg_evt = chrg_event;
+ } else {
+ pnw->psc_cap = event->cap;
+ pnw->psc_cap.chrg_evt = chrg_event;
+ psc_cap = pnw->psc_cap;
+ }
+ spin_unlock_irqrestore(&pnw->cap_lock, flags);
+
+ if (chrg_event != -1) {
+ dev_dbg(pnw->dev, "ma = %d, evt = %d, type = %s\n",
+ psc_cap.ma, psc_cap.chrg_evt,
+ psc_string(psc_cap.chrg_type));
+
+ atomic_notifier_call_chain(&iotg->otg.notifier,
+ USB_EVENT_CHARGER, &psc_cap);
+ }
+
+ kfree(event);
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ }
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+}
+
+static void penwell_otg_sdp_check_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct otg_bc_cap cap;
+
+ /* Need to handle MFLD/CLV differently per different interface */
+ if (!is_clovertrail(to_pci_dev(pnw->dev))) {
+ if (penwell_otg_query_charging_cap(&cap)) {
+ dev_warn(pnw->dev, "SDP checking failed\n");
+ return;
+ }
+
+ /* If current charging cap is still 100ma SDP,
+ * assume this is a invalid charger and do 500ma
+ * charging */
+ if (cap.ma != 100 || cap.chrg_type != CHRG_SDP)
+ return;
+ } else
+ return;
+
+ dev_info(pnw->dev, "Notify invalid SDP at %dma\n", CHRG_CURR_SDP_INVAL);
+ penwell_otg_update_chrg_cap(CHRG_SDP_INVAL, CHRG_CURR_SDP_INVAL);
+}
+
+static void penwell_otg_work(struct work_struct *work)
+{
+ struct penwell_otg *pnw = container_of(work,
+ struct penwell_otg, work);
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ struct otg_hsm *hsm = &iotg->hsm;
+ enum usb_charger_type charger_type;
+ enum power_supply_charger_cable_type type;
+ int retval;
+ struct pci_dev *pdev;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev,
+ "old state = %s\n", state_string(iotg->otg.state));
+
+ pm_runtime_get_sync(pnw->dev);
+
+ pdev = to_pci_dev(pnw->dev);
+
+ switch (iotg->otg.state) {
+ case OTG_STATE_UNDEFINED:
+ case OTG_STATE_B_IDLE:
+ if (hsm->id == ID_A || hsm->id == ID_ACA_A) {
+ /* Move to A_IDLE state, ID changes */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+ iotg->otg.otg->default_a = 1;
+ hsm->a_srp_det = 0;
+ set_host_mode();
+ penwell_otg_phy_low_power(0);
+
+ /* Always set a_bus_req to 1, in case no ADP */
+ hsm->a_bus_req = 1;
+
+ /* Prevent device enter D0i1 or S3*/
+ wake_lock(&pnw->wake_lock);
+ pm_runtime_get(pnw->dev);
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ } else if (hsm->b_adp_sense_tmout) {
+ hsm->b_adp_sense_tmout = 0;
+ } else if (hsm->b_srp_fail_tmout) {
+ hsm->b_srp_fail_tmr = 0;
+ hsm->b_srp_fail_tmout = 0;
+ hsm->b_bus_req = 0;
+ penwell_otg_nsf_msg(6);
+
+ penwell_update_transceiver();
+ } else if (hsm->b_sess_vld) {
+ /* Check if DCP is detected */
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ charger_type = pnw->charging_cap.chrg_type;
+ type = pnw->psc_cap.chrg_type;
+ if (charger_type == CHRG_DCP ||
+ type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP) {
+ spin_unlock_irqrestore(&pnw->charger_lock,
+ flags);
+ break;
+ }
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ penwell_otg_phy_low_power(0);
+
+ /* Clear power_up */
+ hsm->power_up = 0;
+
+ /* Move to B_PERIPHERAL state, Session Valid */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+ hsm->b_sess_end = 0;
+ hsm->a_bus_suspend = 0;
+
+ /* Start USB Battery charger detection flow */
+
+ /* We need new charger detection flow for Clovertrail.
+ * But for now(power-on), we just skip it.
+ * Later on we'll figure it out.
+ */
+ if (!is_clovertrail(pdev)) {
+ mutex_lock(&pnw->msic_mutex);
+ if (pdev->revision >= 0x8) {
+ retval = penwell_otg_manual_chrg_det();
+ if (retval < 0) {
+ /* if failed, reset controller
+ * and try charger detection
+ * flow again */
+ dev_warn(pnw->dev,
+ "detection failed, retry");
+ set_client_mode();
+ msleep(100);
+ penwell_otg_phy_low_power(0);
+ penwell_spi_reset_phy();
+ retval = penwell_otg_manual_chrg_det();
+ }
+ } else {
+ /* Enable data contact detection */
+ penwell_otg_data_contact_detect();
+ /* Enable charger detection */
+ penwell_otg_charger_detect();
+ retval =
+ penwell_otg_charger_type_detect();
+ }
+ mutex_unlock(&pnw->msic_mutex);
+ if (retval < 0) {
+ dev_warn(pnw->dev, "Charger detect failure\n");
+ break;
+ } else {
+ charger_type = retval;
+ }
+ } else {
+ /* Clovertrail charger detection flow */
+ retval = penwell_otg_charger_det_clt();
+ if (retval < 0) {
+ dev_warn(pnw->dev, "detect failed\n");
+ /* Reset PHY and redo the detection */
+ pnw_phy_ctrl_rst();
+ /* Restart charger detection */
+ retval = penwell_otg_charger_det_clt();
+ if (retval)
+ dev_warn(pnw->dev,
+ "detect fail again\n");
+ break;
+ } else
+ charger_type = retval;
+ }
+
+ /* This is a workaround for self-powered hub case,
+ * vbus valid event comes several ms before id change */
+ if (hsm->id == ID_A) {
+ dev_warn(pnw->dev, "ID changed\n");
+ break;
+ }
+
+ if (charger_type == CHRG_SE1) {
+ dev_info(pnw->dev, "SE1 detected\n");
+
+ /* SE1: set charger type, current, notify EM */
+ penwell_otg_update_chrg_cap(CHRG_SE1,
+ CHRG_CURR_SE1);
+ dev_info(pnw->dev,
+ "reset PHY via SPI if SE1 detected\n");
+
+ if (!is_clovertrail(pdev)) {
+ /* Reset PHY for MFLD only */
+ penwell_otg_msic_spi_access(true);
+ penwell_otg_msic_write(MSIC_FUNCTRLSET,
+ PHYRESET);
+ penwell_otg_msic_spi_access(false);
+ }
+ break;
+ } else if (charger_type == CHRG_DCP) {
+ dev_info(pnw->dev, "DCP detected\n");
+
+ /* DCP: set charger type, current, notify EM */
+ penwell_otg_update_chrg_cap(CHRG_DCP,
+ CHRG_CURR_DCP);
+ set_client_mode();
+ break;
+
+ } else if (charger_type == CHRG_ACA) {
+ dev_info(pnw->dev, "ACA detected\n");
+ if (hsm->id == ID_ACA_A) {
+ /* Move to A_IDLE state, ID changes */
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+ iotg->otg.otg->default_a = 1;
+ hsm->a_srp_det = 0;
+ set_host_mode();
+ penwell_otg_phy_low_power(0);
+
+ /* Always set a_bus_req to 1,
+ * in case no ADP */
+ hsm->a_bus_req = 1;
+
+ /* Prevent device enter D0i1 or S3*/
+ wake_lock(&pnw->wake_lock);
+ pm_runtime_get(pnw->dev);
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ break;
+ } else if (hsm->id == ID_ACA_B) {
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ break;
+ } else if (hsm->id == ID_ACA_C) {
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ /* Clear HNP polling flag */
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->
+ host_request_flag = 0;
+
+ penwell_otg_phy_low_power(0);
+ set_client_mode();
+
+ if (iotg->start_peripheral) {
+ iotg->start_peripheral(iotg);
+ } else {
+ dev_dbg(pnw->dev,
+ "client driver not support\n");
+ break;
+ }
+ }
+ } else if (charger_type == CHRG_CDP) {
+ dev_info(pnw->dev, "CDP detected\n");
+
+ /* MFLD WA: MSIC issue need disable phy intr */
+ if (!is_clovertrail(pdev)) {
+ dev_dbg(pnw->dev,
+ "MFLD WA: enable PHY int\n");
+ penwell_otg_phy_intr(0);
+ }
+
+ /* CDP: set charger type, current, notify EM */
+ penwell_otg_update_chrg_cap(CHRG_CDP,
+ CHRG_CURR_CDP);
+
+ /* Clear HNP polling flag */
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->
+ host_request_flag = 0;
+
+ if (iotg->start_peripheral) {
+ iotg->start_peripheral(iotg);
+ } else {
+ dev_dbg(pnw->dev,
+ "client driver not support\n");
+ break;
+ }
+ } else if (charger_type == CHRG_SDP) {
+ dev_info(pnw->dev, "SDP detected\n");
+
+ /* MFLD WA: MSIC issue need disable phy intr */
+ if (!is_clovertrail(pdev)) {
+ dev_dbg(pnw->dev,
+ "MFLD WA: enable PHY int\n");
+ penwell_otg_phy_intr(0);
+ }
+
+ /* SDP: set charger type and 100ma by default */
+ penwell_otg_update_chrg_cap(CHRG_SDP, 100);
+
+ /* Clear HNP polling flag */
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->
+ host_request_flag = 0;
+
+ penwell_otg_phy_low_power(0);
+ set_client_mode();
+
+ if (iotg->start_peripheral) {
+ iotg->start_peripheral(iotg);
+ } else {
+ dev_dbg(pnw->dev,
+ "client driver not support\n");
+ break;
+ }
+
+ /* Schedule the SDP checking after TIMEOUT */
+ queue_delayed_work(pnw->qwork,
+ &pnw->sdp_check_work,
+ INVALID_SDP_TIMEOUT);
+ } else if (charger_type == CHRG_UNKNOWN) {
+ dev_info(pnw->dev, "Unknown Charger Found\n");
+
+ /* Unknown: set charger type */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN, 0);
+ }
+
+ penwell_otg_eye_diagram_optimize();
+
+ /* MFLD WA for PHY issue */
+ iotg->hsm.in_test_mode = 0;
+ iotg->hsm.ulpi_error = 0;
+
+ if (!is_clovertrail(pdev))
+ penwell_otg_start_ulpi_poll();
+
+ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+
+ } else if ((hsm->b_bus_req || hsm->power_up || hsm->adp_change
+ || hsm->otg_srp_reqd) && !hsm->b_srp_fail_tmr) {
+
+ penwell_otg_mon_bus();
+
+ if (hsm->b_ssend_srp && hsm->b_se0_srp) {
+
+ hsm->power_up = 0;
+ hsm->adp_change = 0;
+
+ /* clear the PHCD before start srp */
+ penwell_otg_phy_low_power(0);
+
+ /* Start SRP */
+ if (pnw->iotg.otg.otg->start_srp)
+ pnw->iotg.otg.otg->start_srp(
+ pnw->iotg.otg.otg);
+ penwell_otg_add_timer(TB_SRP_FAIL_TMR);
+
+ } else {
+ hsm->b_bus_req = 0;
+ dev_info(pnw->dev,
+ "BUS is active, try SRP later\n");
+ }
+
+ /* clear after SRP attemp */
+ if (hsm->otg_srp_reqd) {
+ dev_dbg(pnw->dev, "Test mode: SRP done\n");
+ hsm->otg_srp_reqd = 0;
+ }
+ } else if (!hsm->b_sess_vld && hsm->id == ID_B) {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ charger_type = pnw->charging_cap.chrg_type;
+ type = pnw->psc_cap.chrg_type;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ if (charger_type == CHRG_DCP) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+
+ retval = penwell_otg_ulpi_write(iotg,
+ ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ dev_warn(pnw->dev, "ulpi failed\n");
+ penwell_otg_charger_hwdet(false);
+ } else if (charger_type == CHRG_SE1) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+
+ /* WA: on SE1 detach, reset PHY over SPI */
+ dev_info(pnw->dev,
+ "reset PHY over SPI if SE1 detached\n");
+ penwell_otg_msic_spi_access(true);
+ penwell_otg_msic_write(MSIC_FUNCTRLSET,
+ PHYRESET);
+ penwell_otg_msic_spi_access(false);
+ } else if (type == POWER_SUPPLY_CHARGER_TYPE_USB_ACA) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ penwell_otg_charger_hwdet(false);
+ } else if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+
+ retval = penwell_otg_ulpi_write(iotg,
+ ULPI_PWRCTRLCLR, DPVSRCEN);
+ if (retval)
+ dev_warn(pnw->dev, "ulpi failed\n");
+ penwell_otg_charger_hwdet(false);
+ } else if (type == POWER_SUPPLY_CHARGER_TYPE_SE1) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ }
+ }
+ break;
+
+ case OTG_STATE_B_PERIPHERAL:
+ /* FIXME: Check if ID_ACA_A event will happened in this state */
+ if (hsm->id == ID_A) {
+ iotg->otg.otg->default_a = 1;
+ hsm->a_srp_det = 0;
+
+ cancel_delayed_work_sync(&pnw->ulpi_poll_work);
+ cancel_delayed_work_sync(&pnw->sdp_check_work);
+ penwell_otg_charger_hwdet(false);
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ set_host_mode();
+
+ /* Always set a_bus_req to 1, in case no ADP */
+ hsm->a_bus_req = 1;
+
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+
+ /* Prevent device enter D0i1 or S3*/
+ wake_lock(&pnw->wake_lock);
+ pm_runtime_get(pnw->dev);
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ } else if (hsm->ulpi_error && !hsm->in_test_mode) {
+ /* WA: try to recover once detected PHY issue */
+ hsm->ulpi_error = 0;
+
+ cancel_delayed_work_sync(&pnw->ulpi_poll_work);
+ cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+
+ msleep(2000);
+
+ if (iotg->start_peripheral)
+ iotg->start_peripheral(iotg);
+
+ if (!is_clovertrail(pdev))
+ penwell_otg_start_ulpi_poll();
+
+ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+ /* Move to B_IDLE state, VBUS off/ACA */
+
+ cancel_delayed_work(&pnw->ulpi_poll_work);
+ cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+ hsm->b_bus_req = 0;
+
+ if (is_clovertrail(pdev)) {
+ queue_delayed_work(pnw->qwork,
+ &pnw->ulpi_check_work, HZ);
+ }
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ /* MFLD WA: reenable it for unplug event */
+ if (!is_clovertrail(pdev)) {
+ dev_dbg(pnw->dev, "MFLD WA: disable PHY int\n");
+ penwell_otg_phy_intr(1);
+ }
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ else if (hsm->id == ID_B) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ penwell_otg_charger_hwdet(false);
+ }
+
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ } else if (hsm->b_bus_req && hsm->a_bus_suspend
+ && iotg->otg.otg->gadget
+ && iotg->otg.otg->gadget->b_hnp_enable) {
+
+ penwell_otg_phy_low_power(0);
+ msleep(10);
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ penwell_otg_phy_low_power(0);
+
+ hsm->a_conn = 0;
+ hsm->a_bus_resume = 0;
+
+ if (iotg->start_host) {
+ iotg->start_host(iotg);
+ hsm->test_device = 0;
+
+ /* FIXME: can we allow D3 and D0i3
+ * in B_WAIT_ACON?
+ * Now just disallow it
+ */
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_B_WAIT_ACON;
+ penwell_otg_add_timer(TB_ASE0_BRST_TMR);
+ } else
+ dev_dbg(pnw->dev, "host driver not loaded.\n");
+
+ } else if (hsm->id == ID_ACA_C) {
+ cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+ /* Make sure current limit updated */
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+ } else if (hsm->id == ID_B) {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ type = pnw->psc_cap.chrg_type;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ if (type == POWER_SUPPLY_CHARGER_TYPE_USB_ACA) {
+ /* Notify EM charger ACA removal event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ penwell_otg_charger_hwdet(false);
+ /* Set current when switch from ACA to SDP */
+ if (!hsm->a_bus_suspend && iotg->otg.set_power)
+ iotg->otg.set_power(&iotg->otg, 500);
+ }
+ }
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ if (hsm->id == ID_A) {
+ /* Move to A_IDLE state, ID changes */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+ iotg->otg.otg->default_a = 1;
+ hsm->a_srp_det = 0;
+
+ penwell_otg_HAAR(0);
+
+ PNW_STOP_HOST(pnw);
+
+ set_host_mode();
+
+ /* Always set a_bus_req to 1, in case no ADP */
+ iotg->hsm.a_bus_req = 1;
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+ /* Move to B_IDLE state, VBUS off/ACA */
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ else if (hsm->id == ID_B) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ }
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+ hsm->b_hnp_enable = 0;
+ hsm->b_bus_req = 0;
+ penwell_otg_HAAR(0);
+
+ PNW_STOP_HOST(pnw);
+
+ set_client_mode();
+
+ /* allow D3 and D0i3 */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ } else if (hsm->a_conn) {
+ /* Move to B_HOST state, A connected */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+ penwell_otg_HAAR(0);
+
+ iotg->otg.state = OTG_STATE_B_HOST;
+ penwell_update_transceiver();
+ } else if (hsm->a_bus_resume || hsm->b_ase0_brst_tmout) {
+ /* Move to B_HOST state, A connected */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+ penwell_otg_HAAR(0);
+ penwell_otg_nsf_msg(7);
+
+ PNW_STOP_HOST(pnw);
+
+ hsm->a_bus_suspend = 0;
+ hsm->b_bus_req = 0;
+
+ if (iotg->start_peripheral)
+ iotg->start_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev, "client driver not loaded\n");
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+ } else if (hsm->id == ID_ACA_C) {
+ /* Make sure current limit updated */
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+ } else if (hsm->id == ID_B) {
+#if 0
+ /* only set 2ma due to client function stopped */
+ if (iotg->otg.set_power)
+ iotg->otg.set_power(&iotg->otg, 2);
+#endif
+ }
+ break;
+
+ case OTG_STATE_B_HOST:
+ if (hsm->id == ID_A) {
+ iotg->otg.otg->default_a = 1;
+ hsm->a_srp_det = 0;
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+
+ set_host_mode();
+
+ /* Always set a_bus_req to 1, in case no ADP */
+ hsm->a_bus_req = 1;
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+ /* Move to B_IDLE state, VBUS off/ACA */
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ else if (hsm->id == ID_B) {
+ /* Notify EM charger remove event */
+ penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+ CHRG_CURR_DISCONN);
+ }
+
+ hsm->b_hnp_enable = 0;
+ hsm->b_bus_req = 0;
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+
+ set_client_mode();
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ } else if (!hsm->b_bus_req || !hsm->a_conn
+ || hsm->test_device) {
+ hsm->b_bus_req = 0;
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+ hsm->a_bus_suspend = 0;
+
+ /* Clear HNP polling flag */
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->host_request_flag = 0;
+
+ if (iotg->start_peripheral)
+ iotg->start_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver not loaded.\n");
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+ } else if (hsm->id == ID_ACA_C) {
+ /* Make sure current limit updated */
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+ }
+ break;
+
+ case OTG_STATE_A_IDLE:
+ if (hsm->id == ID_B || hsm->id == ID_ACA_B) {
+ pnw->iotg.otg.otg->default_a = 0;
+ hsm->b_bus_req = 0;
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+
+ hsm->b_bus_req = 0;
+
+ set_client_mode();
+
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ penwell_update_transceiver();
+
+ /* Decrement the device usage counter */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ } else if (hsm->id == ID_ACA_A) {
+
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+ if (hsm->power_up)
+ hsm->power_up = 0;
+
+ if (hsm->adp_change)
+ hsm->adp_change = 0;
+
+ if (hsm->a_srp_det)
+ hsm->a_srp_det = 0;
+
+ hsm->b_conn = 0;
+ hsm->hnp_poll_enable = 0;
+
+ if (iotg->start_host)
+ iotg->start_host(iotg);
+ else {
+ dev_dbg(pnw->dev, "host driver not loaded.\n");
+ break;
+ }
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+
+ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+
+ } else if (!hsm->a_bus_drop && (hsm->power_up || hsm->a_bus_req
+ || hsm->a_srp_det || hsm->adp_change)) {
+ /* power up / adp changes / srp detection should be
+ * cleared at once after handled. */
+ if (hsm->power_up)
+ hsm->power_up = 0;
+
+ if (hsm->adp_change)
+ hsm->adp_change = 0;
+
+ if (hsm->a_srp_det) {
+ hsm->a_srp_det = 0;
+ /* wait SRP done, then enable VBUS */
+ usleep_range(10000, 11000);
+ }
+
+ otg_set_vbus(iotg->otg.otg, true);
+
+ penwell_otg_add_timer(TA_WAIT_VRISE_TMR);
+
+ iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
+
+ penwell_update_transceiver();
+ } else if (hsm->b_sess_end || hsm->a_sess_vld ||
+ hsm->a_srp_det || !hsm->b_sess_vld) {
+ hsm->a_srp_det = 0;
+ dev_dbg(pnw->dev, "reconfig...\n");
+ }
+ break;
+
+ case OTG_STATE_A_WAIT_VRISE:
+ if (hsm->a_bus_drop ||
+ hsm->id == ID_B || hsm->id == ID_ACA_B) {
+ /* Move to A_WAIT_VFALL, over current/user request */
+
+ /* Delete current timer */
+ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (hsm->a_vbus_vld || hsm->a_wait_vrise_tmout
+ || hsm->id == ID_ACA_A) {
+ /* Move to A_WAIT_BCON state, a vbus vld */
+ /* Delete current timer and clear flags */
+ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+
+ if (!hsm->a_vbus_vld) {
+ dev_warn(pnw->dev, "vbus can't rise to vbus vld, overcurrent!\n");
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ }
+
+ if (hsm->id == ID_ACA_A) {
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+ }
+
+ hsm->a_bus_req = 1;
+ hsm->b_conn = 0;
+ hsm->hnp_poll_enable = 0;
+
+ penwell_otg_eye_diagram_optimize();
+
+ if (iotg->start_host) {
+ dev_dbg(pnw->dev, "host_ops registered!\n");
+ iotg->start_host(iotg);
+ } else {
+ dev_dbg(pnw->dev, "host driver not loaded.\n");
+ break;
+ }
+
+ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ /* at least give some time to USB HOST to enumerate
+ * devices before trying to suspend the system*/
+ wake_lock_timeout(&pnw->wake_lock, 5 * HZ);
+
+ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop ||
+ hsm->a_wait_bcon_tmout) {
+ /* Move to A_WAIT_VFALL state, user request */
+
+ /* Delete current timer and clear flags for B-Device */
+ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+ hsm->b_bus_req = 0;
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (!hsm->a_vbus_vld) {
+ /* Move to A_VBUS_ERR state, over-current detected */
+
+ /* CTP SW Workaround, add 300ms debouce on VBUS drop */
+ if (is_clovertrail(pdev)) {
+ msleep(300);
+ if (hsm->a_vbus_vld)
+ break;
+ }
+
+ /* Notify user space for vbus invalid event */
+ penwell_otg_notify_warning(USB_WARNING_VBUS_INVALID);
+
+ /* Delete current timer and disable host function */
+ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS and enter PHY low power mode */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (hsm->b_conn) {
+ /* Move to A_HOST state, device connected */
+
+ /* Delete current timer and disable host function */
+ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+ /* Start HNP polling */
+ if (iotg->start_hnp_poll)
+ iotg->start_hnp_poll(iotg);
+
+ if (!hsm->a_bus_req)
+ hsm->a_bus_req = 1;
+
+ if (hsm->test_device)
+ penwell_otg_add_timer(TTST_MAINT_TMR);
+
+ iotg->otg.state = OTG_STATE_A_HOST;
+ } else if (hsm->id == ID_ACA_A) {
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ }
+ break;
+
+ case OTG_STATE_A_HOST:
+ if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop) {
+ /* Move to A_WAIT_VFALL state, timeout/user request */
+
+ /* Delete current timer and clear flags */
+ if (hsm->test_device) {
+ hsm->test_device = 0;
+ penwell_otg_del_timer(TTST_MAINT_TMR);
+ }
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ penwell_otg_phy_low_power(0);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (hsm->test_device && hsm->tst_maint_tmout) {
+
+ hsm->test_device = 0;
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ penwell_otg_phy_low_power(0);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ /* Clear states and wait for SRP */
+ hsm->a_srp_det = 0;
+ hsm->a_bus_req = 0;
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ } else if (!hsm->a_vbus_vld) {
+ /* Move to A_VBUS_ERR state */
+
+ /* CTP SW Workaround, add 300ms debouce on VBUS drop */
+ if (is_clovertrail(pdev)) {
+ msleep(300);
+ if (hsm->a_vbus_vld)
+ break;
+ }
+
+ /* Notify user space for vbus invalid event */
+ penwell_otg_notify_warning(USB_WARNING_VBUS_INVALID);
+
+ /* Delete current timer and clear flags */
+ if (hsm->test_device) {
+ hsm->test_device = 0;
+ penwell_otg_del_timer(TTST_MAINT_TMR);
+ }
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (!hsm->a_bus_req &&
+ iotg->otg.otg->host->b_hnp_enable) {
+ /* Move to A_SUSPEND state */
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ /* According to Spec 7.1.5 */
+ penwell_otg_add_timer(TA_AIDL_BDIS_TMR);
+
+ /* Set HABA to enable hardware assistance to
+ * signal A-connect after receiver B-disconnect
+ * Hardware will then set client mode and
+ * enable URE, SLE and PCE after the assistance
+ * otg_dummy_irq is used to clean these ints
+ * when client driver is not resumed.
+ */
+ if (request_irq(pdev->irq, otg_dummy_irq,
+ IRQF_SHARED, driver_name,
+ iotg->base) != 0) {
+ dev_dbg(pnw->dev,
+ "request interrupt %d failed\n",
+ pdev->irq);
+ }
+ penwell_otg_HABA(1);
+
+ penwell_otg_loc_sof(0);
+ penwell_otg_phy_low_power(0);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_SUSPEND;
+ } else if (!hsm->b_conn && hsm->test_device
+ && hsm->otg_vbus_off) {
+ /* If it is a test device with otg_vbus_off bit set,
+ * turn off VBUS on disconnect event and stay for
+ * TTST_NOADP without ADP */
+
+ penwell_otg_del_timer(TTST_MAINT_TMR);
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ penwell_otg_phy_low_power(0);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TTST_NOADP_TMR);
+
+ /* disallow D3 or D0i3 */
+ pm_runtime_get(pnw->dev);
+ wake_lock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+
+ } else if (!hsm->b_conn) {
+
+ /* Delete current timer and clear flags */
+ if (hsm->test_device) {
+ hsm->test_device = 0;
+ penwell_otg_del_timer(TTST_MAINT_TMR);
+ }
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ /* add kernel timer */
+ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else if (hsm->id == ID_ACA_A) {
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ } else if (hsm->id == ID_A) {
+ /* Turn on VBUS */
+ otg_set_vbus(iotg->otg.otg, true);
+ }
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
+ hsm->a_bus_drop || hsm->a_aidl_bdis_tmout) {
+ /* Move to A_WAIT_VFALL state, timeout/user request */
+ penwell_otg_HABA(0);
+ free_irq(pdev->irq, iotg->base);
+
+ /* Delete current timer and clear HW assist */
+ if (hsm->a_aidl_bdis_tmout)
+ hsm->a_aidl_bdis_tmout = 0;
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+ if (hsm->id == ID_ACA_B)
+ penwell_otg_update_chrg_cap(CHRG_ACA,
+ CHRG_CURR_ACA);
+
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (!hsm->a_vbus_vld) {
+ /* Move to A_VBUS_ERR state, Over-current */
+ penwell_otg_HABA(0);
+ free_irq(pdev->irq, iotg->base);
+
+ /* Delete current timer and clear flags */
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+ PNW_STOP_HOST(pnw);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (!hsm->b_conn &&
+ !pnw->iotg.otg.otg->host->b_hnp_enable) {
+ /* Move to A_WAIT_BCON */
+
+ /* delete current timer */
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+ /* add kernel timer */
+ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else if (!hsm->b_conn &&
+ pnw->iotg.otg.otg->host->b_hnp_enable) {
+ /* Move to A_PERIPHERAL state, HNP */
+ penwell_otg_HABA(0);
+ free_irq(pdev->irq, iotg->base);
+
+ /* Delete current timer and clear flags */
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+ penwell_otg_phy_low_power(0);
+
+ PNW_STOP_HOST(pnw);
+
+ penwell_otg_phy_low_power(0);
+ hsm->b_bus_suspend = 0;
+
+ /* Clear HNP polling flag */
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->host_request_flag = 0;
+
+ penwell_otg_phy_low_power(0);
+
+ if (iotg->start_peripheral)
+ iotg->start_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver not loaded.\n");
+
+ penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
+ iotg->otg.state = OTG_STATE_A_PERIPHERAL;
+ } else if (hsm->a_bus_req) {
+ /* Move to A_HOST state, user request */
+ penwell_otg_HABA(0);
+ free_irq(pdev->irq, iotg->base);
+
+ /* Delete current timer and clear flags */
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+ penwell_otg_loc_sof(1);
+
+ /* Start HNP polling */
+ if (iotg->start_hnp_poll)
+ iotg->start_hnp_poll(iotg);
+
+ /* allow D3 and D0i3 in A_HOST */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_HOST;
+ } else if (hsm->id == ID_ACA_A) {
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ }
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ if (hsm->id == ID_B || hsm->a_bus_drop) {
+ /* Move to A_WAIT_VFALL state */
+
+ /* Delete current timer and clear flags */
+ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ set_host_mode();
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (!hsm->a_vbus_vld) {
+ /* Move to A_VBUS_ERR state, over-current detected */
+
+ /* Delete current timer and disable client function */
+ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ /* Turn off the VBUS and enter PHY low power mode */
+ otg_set_vbus(iotg->otg.otg, false);
+
+ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (hsm->a_bidl_adis_tmout) {
+ /* Move to A_WAIT_BCON state */
+ hsm->a_bidl_adis_tmr = 0;
+
+ msleep(10);
+ penwell_otg_phy_low_power(0);
+
+ /* Disable client function and switch to host mode */
+ if (iotg->stop_peripheral)
+ iotg->stop_peripheral(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "client driver has been removed.\n");
+
+ hsm->hnp_poll_enable = 0;
+ hsm->b_conn = 0;
+
+ penwell_otg_phy_low_power(0);
+
+ if (iotg->start_host)
+ iotg->start_host(iotg);
+ else
+ dev_dbg(pnw->dev,
+ "host driver not loaded.\n");
+
+ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+ /* allow D3 and D0i3 in A_WAIT_BCON */
+ pm_runtime_put(pnw->dev);
+ wake_unlock(&pnw->wake_lock);
+ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else if (hsm->id == ID_A && hsm->b_bus_suspend) {
+ if (!timer_pending(&pnw->hsm_timer))
+ penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
+ } else if (hsm->id == ID_A && !hsm->b_bus_suspend) {
+ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+ } else if (hsm->id == ID_ACA_A) {
+ penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+ /* Turn off VBUS */
+ otg_set_vbus(iotg->otg.otg, false);
+ }
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
+ hsm->id == ID_ACA_A || hsm->a_bus_drop ||
+ hsm->a_clr_err) {
+ if (hsm->a_clr_err)
+ hsm->a_clr_err = 0;
+
+ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (hsm->a_wait_vfall_tmout) {
+ hsm->a_srp_det = 0;
+ hsm->a_wait_vfall_tmout = 0;
+
+ /* Move to A_IDLE state, vbus falls */
+ /* Always set a_bus_req to 1, in case no ADP */
+ hsm->a_bus_req = 1;
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ } else if (hsm->test_device && hsm->otg_vbus_off
+ && hsm->tst_noadp_tmout) {
+ /* After noadp timeout, switch back to normal mode */
+ hsm->test_device = 0;
+ hsm->otg_vbus_off = 0;
+ hsm->tst_noadp_tmout = 0;
+
+ hsm->a_bus_req = 1;
+
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ penwell_update_transceiver();
+ }
+ break;
+ default:
+ break;
+ ;
+ }
+
+ pm_runtime_put_sync(pnw->dev);
+
+ dev_dbg(pnw->dev,
+ "new state = %s\n", state_string(iotg->otg.state));
+}
+
+static ssize_t
+show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ pm_runtime_get_sync(pnw->dev);
+
+ t = scnprintf(next, size,
+ "\n"
+ "USBCMD = 0x%08x\n"
+ "USBSTS = 0x%08x\n"
+ "USBINTR = 0x%08x\n"
+ "ASYNCLISTADDR = 0x%08x\n"
+ "PORTSC1 = 0x%08x\n"
+ "HOSTPC1 = 0x%08x\n"
+ "OTGSC = 0x%08x\n"
+ "USBMODE = 0x%08x\n",
+ readl(pnw->iotg.base + 0x30),
+ readl(pnw->iotg.base + 0x34),
+ readl(pnw->iotg.base + 0x38),
+ readl(pnw->iotg.base + 0x48),
+ readl(pnw->iotg.base + 0x74),
+ readl(pnw->iotg.base + 0xb4),
+ readl(pnw->iotg.base + 0xf4),
+ readl(pnw->iotg.base + 0xf8)
+ );
+
+ pm_runtime_put_sync(pnw->dev);
+
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ char *next;
+ unsigned size, t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ if (iotg->otg.otg->host)
+ iotg->hsm.a_set_b_hnp_en = iotg->otg.otg->host->b_hnp_enable;
+
+ if (iotg->otg.otg->gadget)
+ iotg->hsm.b_hnp_enable = iotg->otg.otg->gadget->b_hnp_enable;
+
+ t = scnprintf(next, size,
+ "\n"
+ "current state = %s\n"
+ "a_bus_resume = \t%d\n"
+ "a_bus_suspend = \t%d\n"
+ "a_conn = \t%d\n"
+ "a_sess_vld = \t%d\n"
+ "a_srp_det = \t%d\n"
+ "a_vbus_vld = \t%d\n"
+ "b_bus_suspend = \t%d\n"
+ "b_conn = \t%d\n"
+ "b_se0_srp = \t%d\n"
+ "b_ssend_srp = \t%d\n"
+ "b_sess_end = \t%d\n"
+ "b_sess_vld = \t%d\n"
+ "id = \t%d\n"
+ "power_up = \t%d\n"
+ "adp_change = \t%d\n"
+ "test_device = \t%d\n"
+ "a_set_b_hnp_en = \t%d\n"
+ "b_srp_done = \t%d\n"
+ "b_hnp_enable = \t%d\n"
+ "hnp_poll_enable = \t%d\n"
+ "a_wait_vrise_tmout = \t%d\n"
+ "a_wait_bcon_tmout = \t%d\n"
+ "a_aidl_bdis_tmout = \t%d\n"
+ "a_bidl_adis_tmout = \t%d\n"
+ "a_bidl_adis_tmr = \t%d\n"
+ "a_wait_vfall_tmout = \t%d\n"
+ "b_ase0_brst_tmout = \t%d\n"
+ "b_srp_fail_tmout = \t%d\n"
+ "b_srp_fail_tmr = \t%d\n"
+ "b_adp_sense_tmout = \t%d\n"
+ "tst_maint_tmout = \t%d\n"
+ "tst_noadp_tmout = \t%d\n"
+ "a_bus_drop = \t%d\n"
+ "a_bus_req = \t%d\n"
+ "a_clr_err = \t%d\n"
+ "b_bus_req = \t%d\n"
+ "ulpi_error = \t%d\n"
+ "ulpi_polling = \t%d\n",
+ state_string(iotg->otg.state),
+ iotg->hsm.a_bus_resume,
+ iotg->hsm.a_bus_suspend,
+ iotg->hsm.a_conn,
+ iotg->hsm.a_sess_vld,
+ iotg->hsm.a_srp_det,
+ iotg->hsm.a_vbus_vld,
+ iotg->hsm.b_bus_suspend,
+ iotg->hsm.b_conn,
+ iotg->hsm.b_se0_srp,
+ iotg->hsm.b_ssend_srp,
+ iotg->hsm.b_sess_end,
+ iotg->hsm.b_sess_vld,
+ iotg->hsm.id,
+ iotg->hsm.power_up,
+ iotg->hsm.adp_change,
+ iotg->hsm.test_device,
+ iotg->hsm.a_set_b_hnp_en,
+ iotg->hsm.b_srp_done,
+ iotg->hsm.b_hnp_enable,
+ iotg->hsm.hnp_poll_enable,
+ iotg->hsm.a_wait_vrise_tmout,
+ iotg->hsm.a_wait_bcon_tmout,
+ iotg->hsm.a_aidl_bdis_tmout,
+ iotg->hsm.a_bidl_adis_tmout,
+ iotg->hsm.a_bidl_adis_tmr,
+ iotg->hsm.a_wait_vfall_tmout,
+ iotg->hsm.b_ase0_brst_tmout,
+ iotg->hsm.b_srp_fail_tmout,
+ iotg->hsm.b_srp_fail_tmr,
+ iotg->hsm.b_adp_sense_tmout,
+ iotg->hsm.tst_maint_tmout,
+ iotg->hsm.tst_noadp_tmout,
+ iotg->hsm.a_bus_drop,
+ iotg->hsm.a_bus_req,
+ iotg->hsm.a_clr_err,
+ iotg->hsm.b_bus_req,
+ iotg->hsm.ulpi_error,
+ iotg->hsm.ulpi_polling
+ );
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
+
+static ssize_t
+show_chargers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *next;
+ unsigned size, t;
+ enum usb_charger_type type;
+ unsigned int ma;
+ unsigned long flags;
+ struct pci_dev *pdev;
+ struct power_supply_cable_props psc_cap;
+
+ pdev = to_pci_dev(pnw->dev);
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ if (!is_clovertrail(pdev)) {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ type = pnw->charging_cap.chrg_type;
+ ma = pnw->charging_cap.ma;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ t = scnprintf(next, size,
+ "USB Battery Charging Capability\n"
+ "\tUSB Charger Type: %s\n"
+ "\tMax Charging Current: %u\n",
+ charger_string(type),
+ ma
+ );
+ } else {
+ spin_lock_irqsave(&pnw->charger_lock, flags);
+ psc_cap = pnw->psc_cap;
+ spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+ t = scnprintf(next, size,
+ "USB Battery Charging Capability(CLV)\n"
+ "\tUSB Charger Type: %s\n"
+ "\tMax Charging Current: %u\n",
+ psc_string(psc_cap.chrg_type),
+ psc_cap.ma
+ );
+
+ }
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(chargers, S_IRUGO, show_chargers, NULL);
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *next;
+ unsigned size, t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_req);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ if (!iotg->otg.otg->default_a)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ iotg->hsm.a_bus_req = 0;
+ dev_dbg(pnw->dev, "a_bus_req = 0\n");
+ } else if (buf[0] == '1') {
+ /* If a_bus_drop is TRUE, a_bus_req can't be set */
+ if (iotg->hsm.a_bus_drop)
+ return -1;
+ iotg->hsm.a_bus_req = 1;
+ dev_dbg(pnw->dev, "a_bus_req = 1\n");
+ if (iotg->otg.state == OTG_STATE_A_PERIPHERAL) {
+ dev_warn(pnw->dev, "Role switch will be "
+ "performed soon, if connected OTG device "
+ "supports role switch request.\n");
+ dev_warn(pnw->dev, "It may cause data"
+ "corruption during data transfer\n");
+ }
+ }
+
+ penwell_update_transceiver();
+
+ return count;
+}
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR | S_IWGRP,
+ get_a_bus_req, set_a_bus_req);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_drop);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ if (!iotg->otg.otg->default_a)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ iotg->hsm.a_bus_drop = 0;
+ dev_dbg(pnw->dev, "a_bus_drop = 0\n");
+ } else if (buf[0] == '1') {
+ iotg->hsm.a_bus_drop = 1;
+ iotg->hsm.a_bus_req = 0;
+ dev_dbg(pnw->dev, "a_bus_drop = 1, so a_bus_req = 0\n");
+ }
+
+ penwell_update_transceiver();
+
+ return count;
+}
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR | S_IWGRP,
+ get_a_bus_drop, set_a_bus_drop);
+
+static ssize_t
+get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", pnw->iotg.hsm.b_bus_req);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_b_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ if (iotg->otg.otg->default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ iotg->hsm.b_bus_req = 0;
+ dev_dbg(pnw->dev, "b_bus_req = 0\n");
+
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->host_request_flag = 0;
+ } else if (buf[0] == '1') {
+ iotg->hsm.b_bus_req = 1;
+ dev_dbg(pnw->dev, "b_bus_req = 1\n");
+
+ if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+ if (iotg->otg.otg->gadget)
+ iotg->otg.otg->gadget->host_request_flag = 1;
+
+ dev_warn(pnw->dev, "Role switch will be "
+ "performed soon, if connected OTG device "
+ "supports role switch request.\n");
+ dev_warn(pnw->dev, "It may cause data "
+ "corruption during data transfer\n");
+ }
+ }
+
+ penwell_update_transceiver();
+
+ return count;
+}
+static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR | S_IWGRP,
+ get_b_bus_req, set_b_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ if (!iotg->otg.otg->default_a)
+ return -1;
+ if (iotg->otg.state != OTG_STATE_A_VBUS_ERR)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '1') {
+ iotg->hsm.a_clr_err = 1;
+ dev_dbg(pnw->dev, "a_clr_err = 1\n");
+ }
+
+ penwell_update_transceiver();
+
+ return count;
+}
+static DEVICE_ATTR(a_clr_err, S_IRUGO | S_IWUSR | S_IWGRP, NULL, set_a_clr_err);
+
+static ssize_t
+set_ulpi_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+
+ dev_dbg(pnw->dev, "trigger ulpi error manually\n");
+
+ iotg->hsm.ulpi_error = 1;
+
+ penwell_update_transceiver();
+
+ return count;
+}
+static DEVICE_ATTR(ulpi_err, S_IRUGO | S_IWUSR | S_IWGRP, NULL, set_ulpi_err);
+
+static struct attribute *inputs_attrs[] = {
+ &dev_attr_a_bus_req.attr,
+ &dev_attr_a_bus_drop.attr,
+ &dev_attr_b_bus_req.attr,
+ &dev_attr_a_clr_err.attr,
+ &dev_attr_ulpi_err.attr,
+ NULL,
+};
+
+static struct attribute_group debug_dev_attr_group = {
+ .name = "inputs",
+ .attrs = inputs_attrs,
+};
+
+static int penwell_otg_aca_enable(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval = 0;
+ struct pci_dev *pdev;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ pdev = to_pci_dev(pnw->dev);
+
+ if (!is_clovertrail(pdev)) {
+ penwell_otg_msic_spi_access(true);
+
+ retval = intel_scu_ipc_update_register(SPI_TI_VS4,
+ TI_ACA_DET_EN, TI_ACA_DET_EN);
+ if (retval)
+ goto done;
+
+ retval = intel_scu_ipc_update_register(SPI_TI_VS5,
+ TI_ID_FLOAT_EN | TI_ID_RES_EN,
+ TI_ID_FLOAT_EN | TI_ID_RES_EN);
+ if (retval)
+ goto done;
+ } else {
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS4SET,
+ ACADET);
+ if (retval)
+ goto done;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS5SET,
+ IDFLOAT_EN | IDRES_EN);
+ }
+done:
+ if (!is_clovertrail(pdev))
+ penwell_otg_msic_spi_access(false);
+
+ if (retval)
+ dev_warn(pnw->dev, "Failed to enable ACA device detection\n");
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return retval;
+}
+
+static int penwell_otg_aca_disable(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int retval = 0;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS5CLR,
+ IDFLOAT_EN | IDRES_EN);
+ if (retval)
+ goto done;
+
+ retval = penwell_otg_ulpi_write(iotg, ULPI_VS4CLR,
+ ACADET);
+
+done:
+ if (retval)
+ dev_warn(pnw->dev, "Failed to disable ACA device detection\n");
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+ return retval;
+}
+
+static void penwell_spi_reset_phy(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "Reset Phy over SPI\n");
+ penwell_otg_msic_spi_access(true);
+ penwell_otg_msic_write(MSIC_FUNCTRLSET, PHYRESET);
+ penwell_otg_msic_spi_access(false);
+ dev_dbg(pnw->dev, "Reset Phy over SPI Done\n");
+}
+
+static int penwell_otg_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ unsigned long resource, len;
+ void __iomem *base = NULL;
+ int retval;
+ u32 val32;
+ struct penwell_otg *pnw;
+ char qname[] = "penwell_otg_queue";
+ char chrg_qname[] = "penwell_otg_chrg_queue";
+
+ retval = 0;
+
+ dev_info(&pdev->dev, "Intel OTG2.0 controller is detected.\n");
+ dev_info(&pdev->dev, "Driver version: " DRIVER_VERSION "\n");
+
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto done;
+ }
+
+ pnw = kzalloc(sizeof(*pnw), GFP_KERNEL);
+ if (pnw == NULL) {
+ retval = -ENOMEM;
+ goto done;
+ }
+ the_transceiver = pnw;
+
+ /* control register: BAR 0 */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ retval = -EBUSY;
+ goto err;
+ }
+ pnw->region = 1;
+
+ base = ioremap_nocache(resource, len);
+ if (base == NULL) {
+ retval = -EFAULT;
+ goto err;
+ }
+ pnw->iotg.base = base;
+
+ if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
+ retval = -EBUSY;
+ goto err;
+ }
+ pnw->cfg_region = 1;
+
+ if (!pdev->irq) {
+ dev_dbg(&pdev->dev, "No IRQ.\n");
+ retval = -ENODEV;
+ goto err;
+ }
+
+ pnw->qwork = create_singlethread_workqueue(qname);
+ if (!pnw->qwork) {
+ dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ pnw->chrg_qwork = create_singlethread_workqueue(chrg_qname);
+ if (!pnw->chrg_qwork) {
+ dev_dbg(&pdev->dev, "cannot create workqueue %s\n", chrg_qname);
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&pnw->chrg_evt_queue);
+ INIT_WORK(&pnw->work, penwell_otg_work);
+ INIT_WORK(&pnw->psc_notify, penwell_otg_psc_notify_work);
+ INIT_WORK(&pnw->hnp_poll_work, penwell_otg_hnp_poll_work);
+ INIT_WORK(&pnw->uevent_work, penwell_otg_uevent_work);
+ INIT_DELAYED_WORK(&pnw->ulpi_poll_work, penwell_otg_ulpi_poll_work);
+ INIT_DELAYED_WORK(&pnw->ulpi_check_work, penwell_otg_ulpi_check_work);
+ INIT_DELAYED_WORK(&pnw->sdp_check_work, penwell_otg_sdp_check_work);
+
+ /* OTG common part */
+ pnw->dev = &pdev->dev;
+ pnw->iotg.otg.dev = &pdev->dev;
+ pnw->iotg.otg.label = driver_name;
+ pnw->iotg.otg.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL);
+ if (!pnw->iotg.otg.otg) {
+ retval = -ENOMEM;
+ goto err;
+ }
+ pnw->iotg.otg.otg->set_host = penwell_otg_set_host;
+ pnw->iotg.otg.otg->set_peripheral = penwell_otg_set_peripheral;
+ pnw->iotg.otg.set_power = penwell_otg_set_power;
+ pnw->iotg.otg.otg->set_vbus = penwell_otg_set_vbus;
+ pnw->iotg.otg.otg->start_srp = penwell_otg_start_srp;
+ pnw->iotg.otg.get_chrg_status = penwell_otg_get_chrg_status;
+ pnw->iotg.set_adp_probe = NULL;
+ pnw->iotg.set_adp_sense = NULL;
+ pnw->iotg.start_hnp_poll = NULL;
+ pnw->iotg.stop_hnp_poll = NULL;
+ pnw->iotg.otg.state = OTG_STATE_UNDEFINED;
+ pnw->rt_resuming = 0;
+ pnw->rt_quiesce = 0;
+ pnw->queue_stop = 0;
+ pnw->phy_power_state = 1;
+ if (usb_add_phy(&pnw->iotg.otg, USB_PHY_TYPE_USB2)) {
+ dev_err(pnw->dev, "can't set transceiver\n");
+ retval = -EBUSY;
+ goto err;
+ }
+
+ pnw->iotg.ulpi_ops.read = penwell_otg_ulpi_read;
+ pnw->iotg.ulpi_ops.write = penwell_otg_ulpi_write;
+
+ spin_lock_init(&pnw->iotg.hnp_poll_lock);
+ spin_lock_init(&pnw->lock);
+
+ wake_lock_init(&pnw->wake_lock, WAKE_LOCK_SUSPEND, "pnw_wake_lock");
+
+ init_timer(&pnw->hsm_timer);
+ init_timer(&pnw->bus_mon_timer);
+ init_timer(&pnw->hnp_poll_timer);
+ init_completion(&pnw->adp.adp_comp);
+
+ /* Battery Charging part */
+ spin_lock_init(&pnw->charger_lock);
+ spin_lock_init(&pnw->cap_lock);
+ pnw->charging_cap.ma = CHRG_CURR_DISCONN;
+ pnw->charging_cap.chrg_type = CHRG_UNKNOWN;
+ pnw->charging_cap.current_event = USBCHRG_EVENT_DISCONN;
+ pnw->psc_cap.ma = CHRG_CURR_DISCONN;
+ pnw->psc_cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+ pnw->psc_cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&pnw->iotg.iotg_notifier);
+ /* For generic otg notifications */
+ ATOMIC_INIT_NOTIFIER_HEAD(&pnw->iotg.otg.notifier);
+
+ pnw->iotg_notifier.notifier_call = penwell_otg_iotg_notify;
+ if (intel_mid_otg_register_notifier(&pnw->iotg, &pnw->iotg_notifier)) {
+ dev_dbg(pnw->dev, "Failed to register notifier\n");
+ retval = -EBUSY;
+ goto err;
+ }
+ if (register_pm_notifier(&pnw_sleep_pm_notifier)) {
+ dev_dbg(pnw->dev, "Fail to register PM notifier\n");
+ retval = -EBUSY;
+ goto err;
+ }
+
+ /* listen usb core events */
+ usb_register_notify(&otg_nb);
+
+ pnw->otg_pdata = pdev->dev.platform_data;
+ if (pnw->otg_pdata == NULL) {
+ dev_err(pnw->dev, "Failed to get OTG platform data.\n");
+ retval = -ENODEV;
+ goto err;
+ }
+
+ if (pnw->otg_pdata->hnp_poll_support) {
+ pnw->iotg.start_hnp_poll = penwell_otg_start_hnp_poll;
+ pnw->iotg.stop_hnp_poll = penwell_otg_stop_hnp_poll;
+ }
+
+ /* FIXME: Reads Charging compliance bit from scu mip.
+ * This snippet needs to be cleaned up after EM inteface is ready
+ */
+ if (is_clovertrail(pdev)) {
+ u8 smip_data = 0;
+ if (!intel_scu_ipc_read_mip(&smip_data, 1, 0x2e7, 1)) {
+ pnw->otg_pdata->charging_compliance =
+ !(smip_data & 0x40);
+ dev_info(pnw->dev, "charging_compliance = %d\n",
+ pnw->otg_pdata->charging_compliance);
+ } else
+ dev_err(pnw->dev, "scu mip read error\n");
+ }
+
+ if (!is_clovertrail(pdev)) {
+ if (pnw->otg_pdata->gpio_vbus) {
+ retval = gpio_request(pnw->otg_pdata->gpio_vbus,
+ "usb_otg_phy_reset");
+ if (retval < 0) {
+ dev_err(pnw->dev, "request gpio(%d) failed\n",
+ pnw->otg_pdata->gpio_vbus);
+ retval = -ENODEV;
+ goto err;
+ }
+ }
+ }
+
+ if (is_clovertrail(pdev)) {
+ /* Set up gpio for Clovertrail */
+ retval = gpio_request(pnw->otg_pdata->gpio_reset,
+ "usb_otg_phy_reset");
+ if (retval < 0) {
+ dev_err(pnw->dev, "request phy reset gpio(%d) failed\n",
+ pnw->otg_pdata->gpio_reset);
+ retval = -ENODEV;
+ goto err;
+ }
+ retval = gpio_request(pnw->otg_pdata->gpio_cs,
+ "usb_otg_phy_cs");
+ if (retval < 0) {
+ dev_err(pnw->dev, "request phy cs gpio(%d) failed\n",
+ pnw->otg_pdata->gpio_cs);
+ gpio_free(pnw->otg_pdata->gpio_reset);
+ retval = -ENODEV;
+ goto err;
+ }
+ }
+
+ penwell_otg_phy_power(1);
+ penwell_otg_phy_reset();
+
+ mutex_init(&pnw->msic_mutex);
+ pnw->msic = penwell_otg_check_msic();
+
+ penwell_otg_phy_low_power(0);
+
+ if (!is_clovertrail(pdev)) {
+ /* Workaround for ULPI lockup issue, need turn off PHY 4ms */
+ penwell_otg_phy_enable(0);
+ usleep_range(4000, 4500);
+ penwell_otg_phy_enable(1);
+ /* reset phy */
+ dev_dbg(pnw->dev, "Reset Phy over SPI\n");
+ penwell_otg_msic_spi_access(true);
+ penwell_otg_msic_write(MSIC_FUNCTRLSET, PHYRESET);
+ penwell_otg_msic_spi_access(false);
+ dev_dbg(pnw->dev, "Reset Phy over SPI Done\n");
+ }
+
+ /* Enable ID pullup immediately after reeable PHY */
+ val32 = readl(pnw->iotg.base + CI_OTGSC);
+ writel(val32 | OTGSC_IDPU, pnw->iotg.base + CI_OTGSC);
+
+ /* Wait correct value to be synced */
+ set_host_mode();
+ usleep_range(2000, 3000);
+ penwell_otg_phy_low_power(1);
+ msleep(100);
+
+ /* enable ACA device detection for CTP */
+ if (is_clovertrail(pdev))
+ penwell_otg_aca_enable();
+
+ reset_otg();
+ init_hsm();
+
+ /* we need to set active early or the first irqs will be ignored */
+ pm_runtime_set_active(&pdev->dev);
+
+ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+ driver_name, pnw) != 0) {
+ dev_dbg(pnw->dev,
+ "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto err;
+ }
+
+ /* enable OTGSC int */
+ val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
+ OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
+ writel(val32, pnw->iotg.base + CI_OTGSC);
+
+ retval = device_create_file(&pdev->dev, &dev_attr_registers);
+ if (retval < 0) {
+ dev_dbg(pnw->dev,
+ "Can't register sysfs attribute: %d\n", retval);
+ goto err;
+ }
+
+ retval = device_create_file(&pdev->dev, &dev_attr_hsm);
+ if (retval < 0) {
+ dev_dbg(pnw->dev,
+ "Can't hsm sysfs attribute: %d\n", retval);
+ goto err;
+ }
+
+ retval = device_create_file(&pdev->dev, &dev_attr_chargers);
+ if (retval < 0) {
+ dev_dbg(pnw->dev,
+ "Can't chargers sysfs attribute: %d\n", retval);
+ goto err;
+ }
+
+ retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
+ if (retval < 0) {
+ dev_dbg(pnw->dev,
+ "Can't register sysfs attr group: %d\n", retval);
+ goto err;
+ }
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ penwell_update_transceiver();
+
+ return 0;
+
+err:
+ if (the_transceiver)
+ penwell_otg_remove(pdev);
+done:
+ return retval;
+}
+
+static void penwell_otg_remove(struct pci_dev *pdev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct otg_bc_event *evt, *tmp;
+
+ /* ACA device detection disable */
+ penwell_otg_aca_disable();
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
+ if (pnw->qwork) {
+ flush_workqueue(pnw->qwork);
+ destroy_workqueue(pnw->qwork);
+ }
+
+ if (pnw->chrg_qwork) {
+ flush_workqueue(pnw->chrg_qwork);
+ destroy_workqueue(pnw->chrg_qwork);
+ list_for_each_entry_safe(evt, tmp, &pnw->chrg_evt_queue, node) {
+ list_del(&evt->node);
+ kfree(evt);
+ }
+ }
+
+ /* disable OTGSC interrupt as OTGSC doesn't change in reset */
+ writel(0, pnw->iotg.base + CI_OTGSC);
+
+ wake_lock_destroy(&pnw->wake_lock);
+
+ if (pdev->irq)
+ free_irq(pdev->irq, pnw);
+ if (pnw->cfg_region)
+ release_mem_region(USBCFG_ADDR, USBCFG_LEN);
+ if (pnw->iotg.base)
+ iounmap(pnw->iotg.base);
+ kfree(pnw->iotg.otg.otg);
+ if (pnw->region)
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ usb_remove_phy(&pnw->iotg.otg);
+ pci_disable_device(pdev);
+ sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
+ device_remove_file(&pdev->dev, &dev_attr_chargers);
+ device_remove_file(&pdev->dev, &dev_attr_hsm);
+ device_remove_file(&pdev->dev, &dev_attr_registers);
+ usb_unregister_notify(&otg_nb);
+ kfree(pnw);
+ pnw = NULL;
+}
+
+void penwell_otg_shutdown(struct pci_dev *pdev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (!is_clovertrail(pdev)) {
+ /* Disable MSIC Interrupt Notifications */
+ penwell_otg_msic_spi_access(true);
+
+ penwell_otg_msic_write(MSIC_INT_EN_RISE_CLR, 0x1F);
+ penwell_otg_msic_write(MSIC_INT_EN_FALL_CLR, 0x1F);
+
+ penwell_otg_msic_spi_access(false);
+ }
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+
+static int penwell_otg_suspend_noirq(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int ret = 0;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ switch (iotg->otg.state) {
+ case OTG_STATE_A_VBUS_ERR:
+ set_host_mode();
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ penwell_otg_del_timer(TA_WAIT_VFALL_TMR);
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+ iotg->hsm.a_srp_det = 0;
+
+ /* Turn off VBus */
+ otg_set_vbus(iotg->otg.otg, false);
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ if (pnw->iotg.suspend_noirq_host)
+ ret = pnw->iotg.suspend_noirq_host(&pnw->iotg);
+ goto done;
+ break;
+ case OTG_STATE_A_SUSPEND:
+ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+ penwell_otg_HABA(0);
+ PNW_STOP_HOST(pnw);
+ iotg->hsm.a_srp_det = 0;
+
+ penwell_otg_phy_vbus_wakeup(false);
+
+ /* Turn off VBus */
+ otg_set_vbus(iotg->otg.otg, false);
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+ if (pnw->iotg.stop_peripheral)
+ pnw->iotg.stop_peripheral(&pnw->iotg);
+ else
+ dev_dbg(pnw->dev, "client driver has been stopped.\n");
+
+ /* Turn off VBus */
+ otg_set_vbus(iotg->otg.otg, false);
+ iotg->hsm.a_srp_det = 0;
+ iotg->otg.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Stop HNP polling */
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+
+ PNW_STOP_HOST(pnw);
+ iotg->hsm.b_bus_req = 0;
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ dev_dbg(pnw->dev, "don't suspend, client still alive\n");
+ ret = -EBUSY;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+ penwell_otg_HAAR(0);
+
+ PNW_STOP_HOST(pnw);
+ iotg->hsm.b_bus_req = 0;
+ iotg->otg.state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ dev_dbg(pnw->dev, "error state before suspend\n");
+ break;
+ }
+
+
+ if (ret) {
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->queue_stop = 0;
+ spin_unlock_irqrestore(&pnw->lock, flags);
+
+ penwell_update_transceiver();
+ } else {
+ penwell_otg_phy_low_power(1);
+ penwell_otg_vusb330_low_power(1);
+#ifdef CONFIG_USB_PENWELL_OTG_PHY_OFF
+ if (iotg->otg.state == OTG_STATE_B_IDLE) {
+ penwell_otg_phy_power(0);
+ pnw->phy_power_state = 0;
+ }
+#endif
+ }
+
+done:
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return ret;
+}
+
+static int penwell_otg_suspend(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int ret = 0;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+ dev_dbg(pnw->dev, "still alive, don't suspend\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* quiesce any work scheduled */
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->queue_stop = 1;
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ flush_workqueue(pnw->qwork);
+ if (delayed_work_pending(&pnw->ulpi_check_work)) {
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->queue_stop = 0;
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ ret = -EBUSY;
+ goto done;
+ } else
+ flush_delayed_work_sync(&pnw->ulpi_check_work);
+
+ switch (iotg->otg.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+ iotg->hsm.a_srp_det = 0;
+ if (iotg->suspend_host)
+ ret = iotg->suspend_host(iotg);
+ break;
+ case OTG_STATE_A_HOST:
+ if (iotg->stop_hnp_poll)
+ iotg->stop_hnp_poll(iotg);
+ if (iotg->suspend_host)
+ ret = iotg->suspend_host(iotg);
+ break;
+ default:
+ break;
+ }
+
+done:
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return ret;
+}
+
+static void penwell_otg_dump_bogus_wake(void)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int addr = 0x2C8, retval;
+ u8 val;
+
+ /* Enable SPI access */
+ penwell_otg_msic_spi_access(true);
+
+ retval = penwell_otg_msic_read(addr, &val);
+ if (retval) {
+ dev_err(pnw->dev, "msic read failed\n");
+ goto out;
+ }
+ dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+
+ for (addr = 0x340; addr <= 0x348; addr++) {
+ retval = penwell_otg_msic_read(addr, &val);
+ if (retval) {
+ dev_err(pnw->dev, "msic read failed\n");
+ goto out;
+ }
+ dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+ }
+
+ for (addr = 0x394; addr <= 0x3BF; addr++) {
+ retval = penwell_otg_msic_read(addr, &val);
+ if (retval) {
+ dev_err(pnw->dev, "msic read failed\n");
+ goto out;
+ }
+ dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+ }
+
+ addr = 0x192;
+ retval = penwell_otg_msic_read(addr, &val);
+ if (retval) {
+ dev_err(pnw->dev, "msic read failed\n");
+ goto out;
+ }
+
+ dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+out:
+ penwell_otg_msic_spi_access(false);
+}
+
+static int penwell_otg_resume_noirq(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ struct pci_dev *pdev;
+ int ret = 0;
+ u32 val;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+ pdev = to_pci_dev(pnw->dev);
+
+ /* If USB PHY is in OFF state, power on it and do basic init work */
+ if (!pnw->phy_power_state) {
+ penwell_otg_phy_power(1);
+ /* Change phy_power_state to 1 again */
+ pnw->phy_power_state = 1;
+ penwell_otg_phy_reset();
+
+ /* Reset controller and clear PHY low power mode setting */
+ reset_otg();
+ penwell_otg_phy_low_power(0);
+
+ /* Wait ID value to be synced */
+ msleep(60);
+ }
+
+ /* add delay in case controller is back to D0, controller
+ * needs time to sync/latch value for OTGSC register */
+ usleep_range(2000, 2500);
+
+ if (mid_pmu_is_wake_source(PMU_OTG_WAKE_SOURCE)) {
+ /* dump OTGSC register for wakeup event */
+ val = readl(pnw->iotg.base + CI_OTGSC);
+ dev_info(pnw->dev, "%s: CI_OTGSC=0x%x\n", __func__, val);
+ if (val & OTGSC_IDIS)
+ dev_info(pnw->dev, "%s: id change\n", __func__);
+ if (val & OTGSC_DPIS)
+ dev_info(pnw->dev, "%s: data pulse\n", __func__);
+ if (val & OTGSC_BSEIS)
+ dev_info(pnw->dev, "%s: b sess end\n", __func__);
+ if (val & OTGSC_BSVIS)
+ dev_info(pnw->dev, "%s: b sess valid\n", __func__);
+ if (val & OTGSC_ASVIS)
+ dev_info(pnw->dev, "%s: a sess valid\n", __func__);
+ if (val & OTGSC_AVVIS)
+ dev_info(pnw->dev, "%s: a vbus valid\n", __func__);
+
+ if (!(val & OTGSC_INTSTS_MASK)) {
+ static bool uevent_reported;
+ dev_info(pnw->dev,
+ "%s: waking up from USB source, but not a OTG wakeup event\n",
+ __func__);
+ if (!uevent_reported) {
+ if (!is_clovertrail(pdev))
+ penwell_otg_dump_bogus_wake();
+ queue_work(pnw->qwork, &pnw->uevent_work);
+ uevent_reported = true;
+ }
+ }
+ }
+
+ if (iotg->otg.state != OTG_STATE_A_WAIT_BCON &&
+ iotg->otg.state != OTG_STATE_A_HOST) {
+ penwell_otg_vusb330_low_power(0);
+ penwell_otg_phy_low_power(0);
+ }
+
+ /* D3->D0 controller will be reset, so reset work mode and PHY state
+ * which is cleared by the reset */
+
+ switch (pnw->iotg.otg.state) {
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ if (iotg->resume_noirq_host)
+ ret = iotg->resume_noirq_host(iotg);
+ break;
+ default:
+ break;
+ }
+
+
+ /* We didn't disable otgsc interrupt, to prevent intr from happening
+ * before penwell_otg_resume, intr is disabled here, and can be enabled
+ * by penwell_otg_resume
+ */
+ penwell_otg_intr(0);
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return ret;
+}
+
+static int penwell_otg_resume(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
+ int ret = 0;
+ unsigned long flags;
+
+ dev_dbg(pnw->dev, "%s --->\n", __func__);
+ switch (iotg->otg.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if (iotg->resume_host)
+ ret = iotg->resume_host(iotg);
+ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+ break;
+ case OTG_STATE_A_HOST:
+ if (iotg->resume_host)
+ ret = iotg->resume_host(iotg);
+
+ /* FIXME: Ideally here should re-start HNP polling,
+ * no start HNP here, because it blocks the resume
+ */
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* allow queue work from notifier */
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->queue_stop = 0;
+ spin_unlock_irqrestore(&pnw->lock, flags);
+
+ penwell_otg_intr(1);
+
+ /* If a plugging in or pluggout event happens during D3,
+ * we will miss the interrupt, so check OTGSC here to check
+ * if any ID change and update hsm correspondingly
+ */
+ update_hsm();
+ penwell_update_transceiver();
+
+ dev_dbg(pnw->dev, "%s <---\n", __func__);
+ return ret;
+}
+
+
+#ifdef CONFIG_PM_RUNTIME
+/* Runtime PM */
+static int penwell_otg_runtime_suspend(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret = 0;
+ u32 val;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s --->\n", __func__);
+
+ pnw->rt_quiesce = 1;
+
+ /* Flush any pending otg irq on local or any other CPUs.
+ *
+ * Host mode or Device mode irq should be synchronized by itself in
+ * their runtime_suspend handler. In fact, Host mode does so. For
+ * device mode, we don't care as its runtime PM is disabled.
+ *
+ * As device's runtime_status is already RPM_SUSPENDING, after flushing,
+ * any new irq handling will be rejected (otg irq handler only continues
+ * if runtime_status is RPM_ACTIVE).
+ * Thus, now it's safe to put PHY into low power mode and gate the
+ * fabric later in pci_set_power_state().
+ */
+ synchronize_irq(pdev->irq);
+
+ switch (pnw->iotg.otg.state) {
+ case OTG_STATE_A_IDLE:
+ break;
+ case OTG_STATE_B_IDLE:
+ val = readl(pnw->iotg.base + CI_USBMODE);
+ if (!(val & USBMODE_CM)) {
+ /* Controller needs to reset & set mode */
+ dev_dbg(dev, "reset to client mode\n");
+ set_client_mode();
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ if (pnw->iotg.runtime_suspend_host)
+ ret = pnw->iotg.runtime_suspend_host(&pnw->iotg);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ case OTG_STATE_B_PERIPHERAL:
+ if (pnw->iotg.runtime_suspend_peripheral)
+ ret = pnw->iotg.runtime_suspend_peripheral(&pnw->iotg);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->rt_quiesce = 0;
+ if (pnw->rt_resuming) {
+ pnw->rt_resuming = 0;
+ pm_runtime_put(pnw->dev);
+ }
+ spin_unlock_irqrestore(&pnw->lock, flags);
+ goto DONE;
+ }
+
+ penwell_otg_phy_low_power(1);
+
+ msleep(2);
+
+ penwell_otg_vusb330_low_power(1);
+
+DONE:
+ dev_dbg(dev, "%s <---: ret = %d\n", __func__, ret);
+ return ret;
+}
+
+static int penwell_otg_runtime_resume(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+ int ret = 0;
+ u32 val;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s --->\n", __func__);
+
+ penwell_otg_phy_low_power(0);
+ penwell_otg_vusb330_low_power(0);
+ /* waiting for hardware stable */
+ usleep_range(2000, 4000);
+
+ switch (pnw->iotg.otg.state) {
+ case OTG_STATE_A_IDLE:
+ break;
+ case OTG_STATE_B_IDLE:
+ val = readl(pnw->iotg.base + CI_USBMODE);
+ if (!(val & USBMODE_CM)) {
+ /* Controller needs to reset & set mode */
+ dev_dbg(dev, "reset to client mode\n");
+ set_client_mode();
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ if (pnw->iotg.runtime_resume_host)
+ ret = pnw->iotg.runtime_resume_host(&pnw->iotg);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ case OTG_STATE_B_PERIPHERAL:
+ if (pnw->iotg.runtime_resume_peripheral)
+ ret = pnw->iotg.runtime_resume_peripheral(&pnw->iotg);
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&pnw->lock, flags);
+ pnw->rt_quiesce = 0;
+ if (pnw->rt_resuming) {
+ pnw->rt_resuming = 0;
+ pm_runtime_put(pnw->dev);
+ }
+ spin_unlock_irqrestore(&pnw->lock, flags);
+
+ dev_dbg(dev, "%s <---\n", __func__);
+
+ return ret;
+}
+
+static int penwell_otg_runtime_idle(struct device *dev)
+{
+ struct penwell_otg *pnw = the_transceiver;
+
+ dev_dbg(dev, "%s --->\n", __func__);
+
+ switch (pnw->iotg.otg.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_VBUS_ERR:
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+ dev_dbg(dev, "Keep in active\n");
+ dev_dbg(dev, "%s <---\n", __func__);
+ return -EBUSY;
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ /* Schedule runtime_suspend without delay */
+ pm_schedule_suspend(dev, 0);
+ dev_dbg(dev, "%s <---\n", __func__);
+ return -EBUSY;
+ default:
+ break;
+ }
+
+ /* some delay for stability */
+ pm_schedule_suspend(dev, 500);
+
+ dev_dbg(dev, "%s <---\n", __func__);
+
+ return -EBUSY;
+}
+
+#else
+
+#define penwell_otg_runtime_suspend NULL
+#define penwell_otg_runtime_resume NULL
+#define penwell_otg_runtime_idle NULL
+
+#endif
+
+/*----------------------------------------------------------*/
+
+DEFINE_PCI_DEVICE_TABLE(pci_ids) = {{
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x20),
+ .class_mask = ~0,
+ .vendor = 0x8086,
+ .device = 0x0829,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* Cloverview */
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x20),
+ .class_mask = ~0,
+ .vendor = 0x8086,
+ .device = 0xE006,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* end: all zeroes */ }
+};
+
+static const struct dev_pm_ops penwell_otg_pm_ops = {
+ .runtime_suspend = penwell_otg_runtime_suspend,
+ .runtime_resume = penwell_otg_runtime_resume,
+ .runtime_idle = penwell_otg_runtime_idle,
+ .suspend = penwell_otg_suspend,
+ .suspend_noirq = penwell_otg_suspend_noirq,
+ .resume = penwell_otg_resume,
+ .resume_noirq = penwell_otg_resume_noirq,
+};
+
+static struct pci_driver otg_pci_driver = {
+ .name = (char *) driver_name,
+ .id_table = pci_ids,
+
+ .probe = penwell_otg_probe,
+ .remove = penwell_otg_remove,
+ .shutdown = penwell_otg_shutdown,
+ .driver = {
+ .pm = &penwell_otg_pm_ops
+ },
+};
+
+static int __init penwell_otg_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ pm_sss0_base = ioremap_nocache(0xFF11D030, 0x100);
+#endif
+ return pci_register_driver(&otg_pci_driver);
+}
+module_init(penwell_otg_init);
+
+static void __exit penwell_otg_cleanup(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ iounmap(pm_sss0_base);
+#endif
+ pci_unregister_driver(&otg_pci_driver);
+}
+module_exit(penwell_otg_cleanup);
To compile this driver as a module, choose M here.
+config INTEL_SCU_WATCHDOG_EVO
+ bool "Intel SCU Watchdog Evolution for Mobile Platforms"
+ depends on X86_INTEL_MID
+ ---help---
+ Hardware driver evolution for the watchdog timer built into the Intel
+ SCU for Intel Mobile Platforms.
+
+ This driver supports the watchdog evolution implementation in SCU,
+ available for Merrifield generation.
+
+ To compile this driver as a module, choose M here.
+
+config DISABLE_SCU_WATCHDOG
+ bool "De-activate Intel SCU Watchdog by cmdline for Mobile Platforms"
+ depends on INTEL_SCU_WATCHDOG || INTEL_SCU_WATCHDOG_EVO
+ ---help---
+ De-activate the watchdog by cmdline for Intel Mobile Platforms.
+ This allows to use breakpoints without resetting.
+
+ Only for debug purpose.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG_EVO) += intel_scu_watchdog_evo.o
# M32R Architecture
+++ /dev/null
-/*
- * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
- * for Intel part #(s):
- * - AF82MP20 PCH
- *
- * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General
- * Public License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- * The full GNU General Public License is included in this
- * distribution in the file called COPYING.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/sfi.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/mrst.h>
-
-#include "intel_scu_watchdog.h"
-
-/* Bounds number of times we will retry loading time count */
-/* This retry is a work around for a silicon bug. */
-#define MAX_RETRY 16
-
-#define IPC_SET_WATCHDOG_TIMER 0xF8
-
-static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
-module_param(timer_margin, int, 0);
-MODULE_PARM_DESC(timer_margin,
- "Watchdog timer margin"
- "Time between interrupt and resetting the system"
- "The range is from 1 to 160"
- "This is the time for all keep alives to arrive");
-
-static int timer_set = DEFAULT_TIME;
-module_param(timer_set, int, 0);
-MODULE_PARM_DESC(timer_set,
- "Default Watchdog timer setting"
- "Complete cycle time"
- "The range is from 1 to 170"
- "This is the time for all keep alives to arrive");
-
-/* After watchdog device is closed, check force_boot. If:
- * force_boot == 0, then force boot on next watchdog interrupt after close,
- * force_boot == 1, then force boot immediately when device is closed.
- */
-static int force_boot;
-module_param(force_boot, int, 0);
-MODULE_PARM_DESC(force_boot,
- "A value of 1 means that the driver will reboot"
- "the system immediately if the /dev/watchdog device is closed"
- "A value of 0 means that when /dev/watchdog device is closed"
- "the watchdog timer will be refreshed for one more interval"
- "of length: timer_set. At the end of this interval, the"
- "watchdog timer will reset the system."
- );
-
-/* there is only one device in the system now; this can be made into
- * an array in the future if we have more than one device */
-
-static struct intel_scu_watchdog_dev watchdog_device;
-
-/* Forces restart, if force_reboot is set */
-static void watchdog_fire(void)
-{
- if (force_boot) {
- pr_crit("Initiating system reboot\n");
- emergency_restart();
- pr_crit("Reboot didn't ?????\n");
- }
-
- else {
- pr_crit("Immediate Reboot Disabled\n");
- pr_crit("System will reset when watchdog timer times out!\n");
- }
-}
-
-static int check_timer_margin(int new_margin)
-{
- if ((new_margin < MIN_TIME_CYCLE) ||
- (new_margin > MAX_TIME - timer_set)) {
- pr_debug("value of new_margin %d is out of the range %d to %d\n",
- new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * IPC operations
- */
-static int watchdog_set_ipc(int soft_threshold, int threshold)
-{
- u32 *ipc_wbuf;
- u8 cbuf[16] = { '\0' };
- int ipc_ret = 0;
-
- ipc_wbuf = (u32 *)&cbuf;
- ipc_wbuf[0] = soft_threshold;
- ipc_wbuf[1] = threshold;
-
- ipc_ret = intel_scu_ipc_command(
- IPC_SET_WATCHDOG_TIMER,
- 0,
- ipc_wbuf,
- 2,
- NULL,
- 0);
-
- if (ipc_ret != 0)
- pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
-
- return ipc_ret;
-};
-
-/*
- * Intel_SCU operations
- */
-
-/* timer interrupt handler */
-static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
-{
- int int_status;
- int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
-
- pr_debug("irq, int_status: %x\n", int_status);
-
- if (int_status != 0)
- return IRQ_NONE;
-
- /* has the timer been started? If not, then this is spurious */
- if (watchdog_device.timer_started == 0) {
- pr_debug("spurious interrupt received\n");
- return IRQ_HANDLED;
- }
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* set the timer to the threshold */
- iowrite32(watchdog_device.threshold,
- watchdog_device.timer_load_count_addr);
-
- /* allow the timer to run */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- return IRQ_HANDLED;
-}
-
-static int intel_scu_keepalive(void)
-{
-
- /* read eoi register - clears interrupt */
- ioread32(watchdog_device.timer_clear_interrupt_addr);
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* set the timer to the soft_threshold */
- iowrite32(watchdog_device.soft_threshold,
- watchdog_device.timer_load_count_addr);
-
- /* allow the timer to run */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- return 0;
-}
-
-static int intel_scu_stop(void)
-{
- iowrite32(0, watchdog_device.timer_control_addr);
- return 0;
-}
-
-static int intel_scu_set_heartbeat(u32 t)
-{
- int ipc_ret;
- int retry_count;
- u32 soft_value;
- u32 hw_pre_value;
- u32 hw_value;
-
- watchdog_device.timer_set = t;
- watchdog_device.threshold =
- timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
- watchdog_device.soft_threshold =
- (watchdog_device.timer_set - timer_margin)
- * watchdog_device.timer_tbl_ptr->freq_hz;
-
- pr_debug("set_heartbeat: timer freq is %d\n",
- watchdog_device.timer_tbl_ptr->freq_hz);
- pr_debug("set_heartbeat: timer_set is %x (hex)\n",
- watchdog_device.timer_set);
- pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
- pr_debug("set_heartbeat: threshold is %x (hex)\n",
- watchdog_device.threshold);
- pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
- watchdog_device.soft_threshold);
-
- /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
- /* watchdog timing come out right. */
- watchdog_device.threshold =
- watchdog_device.threshold / FREQ_ADJUSTMENT;
- watchdog_device.soft_threshold =
- watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
-
- /* temporarily disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
- /* send the threshold and soft_threshold via IPC to the processor */
- ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
- watchdog_device.threshold);
-
- if (ipc_ret != 0) {
- /* Make sure the watchdog timer is stopped */
- intel_scu_stop();
- return ipc_ret;
- }
-
- /* Soft Threshold set loop. Early versions of silicon did */
- /* not always set this count correctly. This loop checks */
- /* the value and retries if it was not set correctly. */
-
- retry_count = 0;
- soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
- do {
-
- /* Make sure timer is stopped */
- intel_scu_stop();
-
- if (MAX_RETRY < retry_count++) {
- /* Unable to set timer value */
- pr_err("Unable to set timer\n");
- return -ENODEV;
- }
-
- /* set the timer to the soft threshold */
- iowrite32(watchdog_device.soft_threshold,
- watchdog_device.timer_load_count_addr);
-
- /* read count value before starting timer */
- hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
- hw_pre_value = hw_pre_value & 0xFFFF0000;
-
- /* Start the timer */
- iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
- /* read the value the time loaded into its count reg */
- hw_value = ioread32(watchdog_device.timer_load_count_addr);
- hw_value = hw_value & 0xFFFF0000;
-
-
- } while (soft_value != hw_value);
-
- watchdog_device.timer_started = 1;
-
- return 0;
-}
-
-/*
- * /dev/watchdog handling
- */
-
-static int intel_scu_open(struct inode *inode, struct file *file)
-{
-
- /* Set flag to indicate that watchdog device is open */
- if (test_and_set_bit(0, &watchdog_device.driver_open))
- return -EBUSY;
-
- /* Check for reopen of driver. Reopens are not allowed */
- if (watchdog_device.driver_closed)
- return -EPERM;
-
- return nonseekable_open(inode, file);
-}
-
-static int intel_scu_release(struct inode *inode, struct file *file)
-{
- /*
- * This watchdog should not be closed, after the timer
- * is started with the WDIPC_SETTIMEOUT ioctl
- * If force_boot is set watchdog_fire() will cause an
- * immediate reset. If force_boot is not set, the watchdog
- * timer is refreshed for one more interval. At the end
- * of that interval, the watchdog timer will reset the system.
- */
-
- if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
- pr_debug("intel_scu_release, without open\n");
- return -ENOTTY;
- }
-
- if (!watchdog_device.timer_started) {
- /* Just close, since timer has not been started */
- pr_debug("closed, without starting timer\n");
- return 0;
- }
-
- pr_crit("Unexpected close of /dev/watchdog!\n");
-
- /* Since the timer was started, prevent future reopens */
- watchdog_device.driver_closed = 1;
-
- /* Refresh the timer for one more interval */
- intel_scu_keepalive();
-
- /* Reboot system (if force_boot is set) */
- watchdog_fire();
-
- /* We should only reach this point if force_boot is not set */
- return 0;
-}
-
-static ssize_t intel_scu_write(struct file *file,
- char const *data,
- size_t len,
- loff_t *ppos)
-{
-
- if (watchdog_device.timer_started)
- /* Watchdog already started, keep it alive */
- intel_scu_keepalive();
- else
- /* Start watchdog with timer value set by init */
- intel_scu_set_heartbeat(watchdog_device.timer_set);
-
- return len;
-}
-
-static long intel_scu_ioctl(struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- u32 __user *p = argp;
- u32 new_margin;
-
-
- static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT
- | WDIOF_KEEPALIVEPING,
- .firmware_version = 0, /* @todo Get from SCU via
- ipc_get_scu_fw_version()? */
- .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp,
- &ident,
- sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- intel_scu_keepalive();
-
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, p))
- return -EFAULT;
-
- if (check_timer_margin(new_margin))
- return -EINVAL;
-
- if (intel_scu_set_heartbeat(new_margin))
- return -EINVAL;
- return 0;
- case WDIOC_GETTIMEOUT:
- return put_user(watchdog_device.soft_threshold, p);
-
- default:
- return -ENOTTY;
- }
-}
-
-/*
- * Notifier for system down
- */
-static int intel_scu_notify_sys(struct notifier_block *this,
- unsigned long code,
- void *another_unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- /* Turn off the watchdog timer. */
- intel_scu_stop();
- return NOTIFY_DONE;
-}
-
-/*
- * Kernel Interfaces
- */
-static const struct file_operations intel_scu_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = intel_scu_write,
- .unlocked_ioctl = intel_scu_ioctl,
- .open = intel_scu_open,
- .release = intel_scu_release,
-};
-
-static int __init intel_scu_watchdog_init(void)
-{
- int ret;
- u32 __iomem *tmp_addr;
-
- /*
- * We don't really need to check this as the SFI timer get will fail
- * but if we do so we can exit with a clearer reason and no noise.
- *
- * If it isn't an intel MID device then it doesn't have this watchdog
- */
- if (!mrst_identify_cpu())
- return -ENODEV;
-
- /* Check boot parameters to verify that their initial values */
- /* are in range. */
- /* Check value of timer_set boot parameter */
- if ((timer_set < MIN_TIME_CYCLE) ||
- (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
- pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n",
- timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
- return -EINVAL;
- }
-
- /* Check value of timer_margin boot parameter */
- if (check_timer_margin(timer_margin))
- return -EINVAL;
-
- watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
-
- if (watchdog_device.timer_tbl_ptr == NULL) {
- pr_debug("timer is not available\n");
- return -ENODEV;
- }
- /* make sure the timer exists */
- if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
- pr_debug("timer %d does not have valid physical memory\n",
- sfi_mtimer_num);
- return -ENODEV;
- }
-
- if (watchdog_device.timer_tbl_ptr->irq == 0) {
- pr_debug("timer %d invalid irq\n", sfi_mtimer_num);
- return -ENODEV;
- }
-
- tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
- 20);
-
- if (tmp_addr == NULL) {
- pr_debug("timer unable to ioremap\n");
- return -ENOMEM;
- }
-
- watchdog_device.timer_load_count_addr = tmp_addr++;
- watchdog_device.timer_current_value_addr = tmp_addr++;
- watchdog_device.timer_control_addr = tmp_addr++;
- watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
- watchdog_device.timer_interrupt_status_addr = tmp_addr++;
-
- /* Set the default time values in device structure */
-
- watchdog_device.timer_set = timer_set;
- watchdog_device.threshold =
- timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
- watchdog_device.soft_threshold =
- (watchdog_device.timer_set - timer_margin)
- * watchdog_device.timer_tbl_ptr->freq_hz;
-
-
- watchdog_device.intel_scu_notifier.notifier_call =
- intel_scu_notify_sys;
-
- ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
- if (ret) {
- pr_err("cannot register notifier %d)\n", ret);
- goto register_reboot_error;
- }
-
- watchdog_device.miscdev.minor = WATCHDOG_MINOR;
- watchdog_device.miscdev.name = "watchdog";
- watchdog_device.miscdev.fops = &intel_scu_fops;
-
- ret = misc_register(&watchdog_device.miscdev);
- if (ret) {
- pr_err("cannot register miscdev %d err =%d\n",
- WATCHDOG_MINOR, ret);
- goto misc_register_error;
- }
-
- ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
- watchdog_timer_interrupt,
- IRQF_SHARED, "watchdog",
- &watchdog_device.timer_load_count_addr);
- if (ret) {
- pr_err("error requesting irq %d\n", ret);
- goto request_irq_error;
- }
- /* Make sure timer is disabled before returning */
- intel_scu_stop();
- return 0;
-
-/* error cleanup */
-
-request_irq_error:
- misc_deregister(&watchdog_device.miscdev);
-misc_register_error:
- unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
-register_reboot_error:
- intel_scu_stop();
- iounmap(watchdog_device.timer_load_count_addr);
- return ret;
-}
-
-static void __exit intel_scu_watchdog_exit(void)
-{
-
- misc_deregister(&watchdog_device.miscdev);
- unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
- /* disable the timer */
- iowrite32(0x00000002, watchdog_device.timer_control_addr);
- iounmap(watchdog_device.timer_load_count_addr);
-}
-
-late_initcall(intel_scu_watchdog_init);
-module_exit(intel_scu_watchdog_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_VERSION(WDT_VER);
--- /dev/null
+/*
+ * Intel_SCU 0.3: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* See Documentation/watchdog/intel-scu-watchdog.txt */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/rpmsg.h>
+#include <linux/nmi.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "intel_scu_watchdog_evo.h"
+
+/* Adjustment flags */
+#define CONFIG_INTEL_SCU_SOFT_LOCKUP
+#define CONFIG_DEBUG_WATCHDOG
+
+/* Defines */
+#define STRING_RESET_TYPE_MAX_LEN 11
+#define STRING_COLD_OFF "COLD_OFF"
+#define STRING_COLD_RESET "COLD_RESET"
+#define STRING_COLD_BOOT "COLD_BOOT"
+
+#define EXT_TIMER0_MSI 12
+
+#define IPC_WATCHDOG 0xF8
+
+enum {
+ SCU_WATCHDOG_START = 0,
+ SCU_WATCHDOG_STOP,
+ SCU_WATCHDOG_KEEPALIVE,
+ SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT
+};
+
+enum {
+ SCU_COLD_OFF_ON_TIMEOUT = 0,
+ SCU_COLD_RESET_ON_TIMEOUT,
+ SCU_COLD_BOOT_ON_TIMEOUT,
+ SCU_DO_NOTHING_ON_TIMEOUT
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define SECURITY_WATCHDOG_ADDR 0xFF222230
+#define STRING_NONE "NONE"
+#endif
+
+/* Statics */
+static int reset_type_to_string(int reset_type, char *string);
+static int string_to_reset_type(const char *string, int *reset_type);
+static struct intel_scu_watchdog_dev watchdog_device;
+static unsigned char osnib_reset = OSNIB_WRITE_VALUE;
+
+/* Module params */
+static bool kicking_active = true;
+#ifdef CONFIG_DEBUG_WATCHDOG
+module_param(kicking_active, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kicking_active,
+ "Deactivate the kicking will result in a cold reset"
+ "after a while");
+#endif
+
+static bool disable_kernel_watchdog = false;
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+/*
+ * Please note that we are using a config CONFIG_DISABLE_SCU_WATCHDOG
+ * because this boot parameter should only be settable in a developement
+ */
+module_param(disable_kernel_watchdog, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_kernel_watchdog,
+ "Disable kernel watchdog"
+ "Set to 0, watchdog started at boot"
+ "and left running; Set to 1; watchdog"
+ "is not started until user space"
+ "watchdog daemon is started; also if the"
+ "timer is started by the iafw firmware, it"
+ "will be disabled upon initialization of this"
+ "driver if disable_kernel_watchdog is set");
+#endif
+
+static int pre_timeout = DEFAULT_PRETIMEOUT;
+
+static int timeout = DEFAULT_TIMEOUT;
+module_param(timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(timeout,
+ "Default Watchdog timer setting"
+ "Complete cycle time"
+ "The range is from 35 to 170"
+ "This is the time for all keep alives to arrive");
+
+static bool reset_on_release = true;
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+/*
+ * heartbeats: cpu last kstat.system times
+ * beattime : jiffies at the sample time of heartbeats.
+ * SOFT_LOCK_TIME : some time out in sec after warning interrupt.
+ * dump_softloc_debug : called on SOFT_LOCK_TIME time out after scu
+ * interrupt to log data to logbuffer and emmc-panic code,
+ * SOFT_LOCK_TIME needs to be < SCU warn to reset time
+ * which is currently thats 15 sec.
+ *
+ * The soft lock works be taking a snapshot of kstat_cpu(i).cpustat.system at
+ * the time of the warning interrupt for each cpu. Then at SOFT_LOCK_TIME the
+ * amount of time spend in system is computed and if its within 10 ms of the
+ * total SOFT_LOCK_TIME on any cpu it will dump the stack on that cpu and then
+ * calls panic.
+ *
+ */
+static u64 heartbeats[NR_CPUS];
+static u64 beattime;
+#define SOFT_LOCK_TIME 10
+static void dump_softlock_debug(unsigned long data);
+DEFINE_TIMER(softlock_timer, dump_softlock_debug, 0, 0);
+
+static struct rpmsg_instance *watchdog_instance;
+
+/* time is about to run out and the scu will reset soon. quickly
+ * dump debug data to logbuffer and emmc via calling panic before lights
+ * go out.
+ */
+static void smp_dumpstack(void *info)
+{
+ dump_stack();
+}
+
+static void dump_softlock_debug(unsigned long data)
+{
+ int i, reboot;
+ u64 system[NR_CPUS], num_jifs;
+
+ memset(system, 0, NR_CPUS*sizeof(u64));
+
+ num_jifs = jiffies - beattime;
+ for_each_possible_cpu(i) {
+ system[i] = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM] -
+ heartbeats[i];
+ }
+
+ reboot = 0;
+
+ for_each_possible_cpu(i) {
+ if ((num_jifs - cputime_to_jiffies(system[i])) <
+ msecs_to_jiffies(10)) {
+ WARN(1, "cpu %d wedged\n", i);
+ smp_call_function_single(i, smp_dumpstack, NULL, 1);
+ reboot = 1;
+ }
+ }
+
+ if (reboot) {
+ panic_timeout = 10;
+ trigger_all_cpu_backtrace();
+ panic("Soft lock on CPUs\n");
+ }
+}
+#endif /* CONFIG_INTEL_SCU_SOFT_LOCKUP */
+
+/* Check current timeouts */
+/* Timeout bounds come from the MODULE_PARAM_DESC description */
+static int check_timeouts(int pre_timeout_time, int timeout_time)
+{
+ if (pre_timeout_time >= timeout_time)
+ return -EINVAL;
+ if (pre_timeout_time > 155 || pre_timeout_time < 1)
+ return -EINVAL;
+ if (timeout_time > 170 || timeout_time < 35)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set the different timeouts needed by the SCU FW and start the
+ * kernel watchdog */
+static int watchdog_set_timeouts_and_start(int pretimeout,
+ int timeout)
+{
+ int ret, error = 0;
+ struct ipc_wd_start {
+ u32 pretimeout;
+ u32 timeout;
+ } ipc_wd_start = { pretimeout, timeout };
+
+ ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+ SCU_WATCHDOG_START, (u8 *)&ipc_wd_start,
+ NULL, sizeof(ipc_wd_start), 0);
+ if (ret) {
+ pr_crit("Error configuring and starting watchdog: %d\n",
+ ret);
+ error = -EIO;
+ }
+
+ return error;
+}
+
+/* Provisioning function for future enhancement : allow to fine tune timing
+ according to watchdog action settings */
+static int watchdog_set_appropriate_timeouts(void)
+{
+ pr_debug("Setting shutdown timeouts\n");
+ return watchdog_set_timeouts_and_start(pre_timeout, timeout);
+}
+
+/* Keep alive */
+static int watchdog_keepalive(void)
+{
+ int ret, error = 0;
+
+ pr_debug("%s\n", __func__);
+
+ if (unlikely(!kicking_active)) {
+ /* Close our eyes */
+ pr_err("Transparent kicking\n");
+ return 0;
+ }
+
+ /* Really kick it */
+ ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+ SCU_WATCHDOG_KEEPALIVE);
+ if (ret) {
+ pr_crit("Error executing keepalive: %x\n", ret);
+ error = -EIO;
+ }
+
+ return error;
+}
+
+/* stops the timer */
+static int watchdog_stop(void)
+{
+ int ret = 0;
+ int error = 0;
+
+ pr_crit("%s\n", __func__);
+
+ ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+ SCU_WATCHDOG_STOP);
+ if (ret) {
+ pr_crit("Error stopping watchdog: %x\n", ret);
+ error = -EIO;
+ }
+
+ watchdog_device.started = false;
+
+ return error;
+}
+
+/* warning interrupt handler */
+static irqreturn_t watchdog_warning_interrupt(int irq, void *dev_id)
+{
+ if (unlikely(!kicking_active))
+ pr_warn("[SHTDWN] WATCHDOG TIMEOUT for test!, %s\n", __func__);
+
+ else
+ pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT!\n", __func__);
+
+ /* Let's reset the platform after dumping some data */
+ trigger_all_cpu_backtrace();
+ panic("Kernel Watchdog");
+
+ /* This code should not be reached */
+ return IRQ_HANDLED;
+}
+
+/* Program and starts the timer */
+static int watchdog_config_and_start(u32 newtimeout, u32 newpretimeout)
+{
+ int ret;
+
+ timeout = newtimeout;
+ pre_timeout = newpretimeout;
+
+ pr_debug("timeout=%ds, pre_timeout=%ds\n", timeout, pre_timeout);
+
+ /* Configure the watchdog */
+ ret = watchdog_set_timeouts_and_start(pre_timeout, timeout);
+ if (ret) {
+ pr_err("%s: Cannot configure the watchdog\n", __func__);
+
+ /* Make sure the watchdog timer is stopped */
+ watchdog_stop();
+ return ret;
+ }
+
+ watchdog_device.started = true;
+
+ return 0;
+}
+
+/* Open */
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+ /* Set flag to indicate that watchdog device is open */
+ if (test_and_set_bit(0, &watchdog_device.driver_open)) {
+ pr_err("watchdog device is busy\n");
+ return -EBUSY;
+ }
+
+ /* Check for reopen of driver. Reopens are not allowed */
+ if (watchdog_device.driver_closed) {
+ pr_err("watchdog device has been closed\n");
+ return -EPERM;
+ }
+
+ return nonseekable_open(inode, file);
+}
+
+/* Release */
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+ /*
+ * This watchdog should not be closed, after the timer
+ * is started with the WDIPC_SETTIMEOUT ioctl
+ * If reset_on_release is set this will cause an
+ * immediate reset. If reset_on_release is not set, the watchdog
+ * timer is refreshed for one more interval. At the end
+ * of that interval, the watchdog timer will reset the system.
+ */
+
+ if (!test_bit(0, &watchdog_device.driver_open)) {
+ pr_err("intel_scu_release, without open\n");
+ return -ENOTTY;
+ }
+
+ if (!watchdog_device.started) {
+ /* Just close, since timer has not been started */
+ pr_err("Closed, without starting timer\n");
+ return 0;
+ }
+
+ pr_crit("Unexpected close of /dev/watchdog!\n");
+
+ /* Since the timer was started, prevent future reopens */
+ watchdog_device.driver_closed = 1;
+
+ /* Refresh the timer for one more interval */
+ watchdog_keepalive();
+
+ /* Reboot system if requested */
+ if (reset_on_release) {
+ pr_crit("Initiating system reboot.\n");
+ emergency_restart();
+ }
+
+ pr_crit("Immediate Reboot Disabled\n");
+ pr_crit("System will reset when watchdog timer expire!\n");
+
+ return 0;
+}
+
+/* Write */
+static ssize_t intel_scu_write(struct file *file, char const *data, size_t len,
+ loff_t *ppos)
+{
+ pr_debug("watchdog %s\n", __func__);
+
+ if (watchdog_device.shutdown_flag == true)
+ /* do nothing if we are shutting down */
+ return len;
+
+ if (watchdog_device.started) {
+ /* Watchdog already started, keep it alive */
+ watchdog_keepalive();
+ }
+
+ return len;
+}
+
+/* ioctl */
+static long intel_scu_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+ u32 val;
+ u32 new_pre_timeout;
+ int options;
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ /* @todo Get from SCU via ipc_get_scu_fw_version()? */
+ .firmware_version = 0,
+ /* len < 32 */
+ .identity = "Intel_SCU IOH Watchdog"
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ pr_debug("%s: KeepAlive ioctl\n", __func__);
+ if (!watchdog_device.started)
+ return -EINVAL;
+
+ watchdog_keepalive();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ pr_debug("%s: SetTimeout ioctl\n", __func__);
+
+ if (watchdog_device.started)
+ return -EBUSY;
+
+ if (get_user(val, p))
+ return -EFAULT;
+ new_pre_timeout = val-15;
+ if (check_timeouts(new_pre_timeout, val)) {
+ pr_warn("%s: Invalid timeout thresholds (timeout: %d, pretimeout: %d) \n", __func__, val, new_pre_timeout);
+ return -EINVAL;
+ }
+
+ pre_timeout = new_pre_timeout;
+ timeout = val;
+ return 0;
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, p);
+ case WDIOC_SETOPTIONS:
+ if (get_user(options, p))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ pr_debug("%s: Stopping the watchdog\n", __func__);
+ watchdog_stop();
+ return 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ pr_debug("%s: Starting the watchdog\n", __func__);
+
+ if (watchdog_device.started)
+ return -EBUSY;
+
+ if (check_timeouts(pre_timeout, timeout)) {
+ pr_warn("%s: Invalid thresholds\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (watchdog_config_and_start(timeout, pre_timeout))
+ return -EINVAL;
+ return 0;
+ }
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int watchdog_set_reset_type(int reset_type)
+{
+ int ret;
+ struct ipc_wd_on_timeout {
+ u32 reset_type;
+ } ipc_wd_on_timeout = { reset_type };
+
+ ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+ SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT,
+ (u8 *)&ipc_wd_on_timeout, NULL,
+ sizeof(ipc_wd_on_timeout), 0);
+ if (ret) {
+ pr_crit("Error setting watchdog action: %d\n", ret);
+ return -EIO;
+ }
+
+ watchdog_device.normal_wd_action = reset_type;
+
+ return 0;
+}
+
+/* Reboot notifier */
+static int reboot_notifier(struct notifier_block *this,
+ unsigned long code,
+ void *another_unused)
+{
+ int ret;
+
+ if (code == SYS_RESTART || code == SYS_HALT || code == SYS_POWER_OFF) {
+ pr_warn("Reboot notifier\n");
+
+ if (watchdog_set_appropriate_timeouts())
+ pr_crit("reboot notifier cant set time\n");
+
+ switch (code) {
+ case SYS_RESTART:
+ ret = watchdog_set_reset_type(
+ watchdog_device.reboot_wd_action);
+ break;
+
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ ret = watchdog_set_reset_type(
+ watchdog_device.shutdown_wd_action);
+ break;
+ }
+ if (ret)
+ pr_err("%s: could not set reset type\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs entry to generate a BUG during
+ any shutdown/reboot call */
+ if (watchdog_device.panic_reboot_notifier)
+ BUG();
+#endif
+ /* Don't do instant reset on close */
+ reset_on_release = false;
+
+ /* Kick once again */
+ if (disable_kernel_watchdog == false) {
+ ret = watchdog_keepalive();
+ if (ret)
+ pr_warn("%s: no keep alive\n", __func__);
+
+ /* Don't allow any more keep-alives */
+ watchdog_device.shutdown_flag = true;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/* This code triggers a Security Watchdog */
+int open_security(struct inode *i, struct file *f)
+{
+ int ret = 0;
+ u64 *ptr;
+ u32 value;
+
+ ptr = ioremap_nocache(SECURITY_WATCHDOG_ADDR, sizeof(u32));
+
+ if (!ptr) {
+ pr_err("cannot open secwd's debugfile\n");
+ ret = -ENODEV;
+ goto error;
+ }
+ value = readl(ptr); /* trigger */
+
+ pr_err("%s: This code should never be reached but it got %x\n",
+ __func__, (unsigned int)value);
+
+error:
+ return ret;
+}
+
+static const struct file_operations security_watchdog_fops = {
+ .open = open_security,
+};
+
+static int kwd_trigger_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ pr_debug("kwd_trigger_write\n");
+ panic("Kernel watchdog triggered\n");
+ return 0;
+}
+
+static const struct file_operations kwd_trigger_fops = {
+ .open = nonseekable_open,
+ .write = kwd_trigger_write,
+ .llseek = no_llseek,
+};
+
+static int kwd_reset_type_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t kwd_reset_type_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len;
+ int ret;
+ char str[STRING_RESET_TYPE_MAX_LEN + 1];
+
+ pr_debug("reading reset_type of %x\n",
+ watchdog_device.normal_wd_action);
+
+ if (*ppos > 0)
+ return 0;
+
+ ret = reset_type_to_string(watchdog_device.normal_wd_action, str);
+ if (ret)
+ return -EINVAL;
+ else {
+ for (len = 0; len < (STRING_RESET_TYPE_MAX_LEN - 1)
+ && str[len] != '\0'; len++)
+ ;
+ str[len++] = '\n';
+ ret = copy_to_user(buff, str, len);
+ }
+
+ *ppos += len;
+ return len;
+}
+
+static ssize_t kwd_reset_type_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ char str[STRING_RESET_TYPE_MAX_LEN];
+ unsigned long res;
+ int ret, reset_type;
+
+ if (count > STRING_RESET_TYPE_MAX_LEN) {
+ pr_err("Invalid size: count=%d\n", count);
+ return -EINVAL;
+ }
+
+ memset(str, 0x00, STRING_RESET_TYPE_MAX_LEN);
+
+ res = copy_from_user((void *)str,
+ (void __user *)buff,
+ (unsigned long)min((unsigned long)(count-1),
+ (unsigned long)(STRING_RESET_TYPE_MAX_LEN-1)));
+
+ if (res) {
+ pr_err("%s: copy to user failed\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("writing reset_type of %s\n", str);
+
+ ret = string_to_reset_type(str, &reset_type);
+ if (ret) {
+ pr_err("Invalid value\n");
+ return -EINVAL;
+ }
+
+ ret = watchdog_set_reset_type(reset_type);
+ if (ret) {
+ pr_err("%s: could not set reset type\n", __func__);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations kwd_reset_type_fops = {
+ .open = nonseekable_open,
+ .release = kwd_reset_type_release,
+ .read = kwd_reset_type_read,
+ .write = kwd_reset_type_write,
+ .llseek = no_llseek,
+};
+
+static ssize_t kwd_panic_reboot_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ # define RET_SIZE 3 /* prints only 2 chars : '0' or '1', plus '\n' */
+ char str[RET_SIZE];
+
+ int res;
+
+ if (*ppos > 0)
+ return 0;
+
+ strcpy(str, watchdog_device.panic_reboot_notifier ? "1\n" : "0\n");
+
+ res = copy_to_user(buff, str, RET_SIZE);
+ if (res) {
+ pr_err("%s: copy to user failed\n", __func__);
+ return -EINVAL;
+ }
+
+ *ppos += RET_SIZE-1;
+ return RET_SIZE-1;
+}
+
+
+static ssize_t kwd_panic_reboot_write(struct file *file,
+ const char __user *buff, size_t count, loff_t *ppos)
+{
+ /* whatever is written, simply set flag to TRUE */
+ watchdog_device.panic_reboot_notifier = true;
+
+ return count;
+}
+
+
+static const struct file_operations kwd_panic_reboot_fops = {
+ .open = nonseekable_open,
+ .read = kwd_panic_reboot_read,
+ .write = kwd_panic_reboot_write,
+ .llseek = no_llseek,
+};
+
+static int remove_debugfs_entries(void)
+{
+struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+ /* /sys/kernel/debug/watchdog */
+ debugfs_remove_recursive(dev->dfs_wd);
+
+ return 0;
+}
+
+static int create_debugfs_entries(void)
+{
+ struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+ /* /sys/kernel/debug/watchdog */
+ dev->dfs_wd = debugfs_create_dir("watchdog", NULL);
+ if (!dev->dfs_wd) {
+ pr_err("%s: Error, cannot create main dir\n", __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/security_watchdog */
+ dev->dfs_secwd = debugfs_create_dir("security_watchdog", dev->dfs_wd);
+ if (!dev->dfs_secwd) {
+ pr_err("%s: Error, cannot create sec dir\n", __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/security_watchdog/trigger */
+ dev->dfs_secwd_trigger = debugfs_create_file("trigger",
+ S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+ dev->dfs_secwd, NULL,
+ &security_watchdog_fops);
+
+ if (!dev->dfs_secwd_trigger) {
+ pr_err("%s: Error, cannot create sec file\n", __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/kernel_watchdog */
+ dev->dfs_kwd = debugfs_create_dir("kernel_watchdog", dev->dfs_wd);
+ if (!dev->dfs_kwd) {
+ pr_err("%s: Error, cannot create kwd dir\n", __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/kernel_watchdog/trigger */
+ dev->dfs_kwd_trigger = debugfs_create_file("trigger",
+ S_IFREG | S_IWUSR | S_IWGRP,
+ dev->dfs_kwd, NULL,
+ &kwd_trigger_fops);
+
+ if (!dev->dfs_kwd_trigger) {
+ pr_err("%s: Error, cannot create kwd trigger file\n",
+ __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/kernel_watchdog/reset_type */
+ dev->dfs_kwd_trigger = debugfs_create_file("reset_type",
+ S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+ dev->dfs_kwd, NULL,
+ &kwd_reset_type_fops);
+
+ if (!dev->dfs_kwd_trigger) {
+ pr_err("%s: Error, cannot create kwd trigger file\n",
+ __func__);
+ goto error;
+ }
+
+ /* /sys/kernel/debug/watchdog/kernel_watchdog/panic_reboot_notifier */
+ dev->dfs_kwd_panic_reboot = debugfs_create_file("panic_reboot_notifier",
+ S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+ dev->dfs_kwd, NULL,
+ &kwd_panic_reboot_fops);
+
+ if (!dev->dfs_kwd_panic_reboot) {
+ pr_err("%s: Error, cannot create kwd panic_reboot_notifier file\n",
+ __func__);
+ goto error;
+ }
+
+
+ return 0;
+error:
+ remove_debugfs_entries();
+ return 1;
+}
+#endif /* CONFIG_DEBUG_FS*/
+
+/* Kernel Interfaces */
+static const struct file_operations intel_scu_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = intel_scu_write,
+ .unlocked_ioctl = intel_scu_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = intel_scu_ioctl,
+#endif
+ .open = intel_scu_open,
+ .release = intel_scu_release,
+};
+
+/* sysfs entry to disable watchdog */
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+static ssize_t disable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ if (!strtobool(buf, &disable_kernel_watchdog)) {
+ if (disable_kernel_watchdog) {
+ ret = watchdog_stop();
+ if (ret)
+ pr_err("cannot disable the timer\n");
+ } else {
+ ret = watchdog_config_and_start(timeout, pre_timeout);
+ if (ret)
+ return -EINVAL;
+ }
+ } else {
+ pr_err("got invalid value\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ pr_debug("%s\n", __func__);
+ if (disable_kernel_watchdog)
+ return sprintf(buf, "1\n");
+
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(disable, S_IWUSR | S_IRUGO,
+ disable_show, disable_store);
+
+#endif
+
+#define OSNIB_WDOG_COUNTER_MASK 0xF0
+#define OSNIB_WDOG_COUNTER_SHIFT 4
+#define WDOG_COUNTER_MAX_VALUE 3
+static ssize_t counter_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ ret = sscanf(buf, "%hhu", &osnib_reset);
+ if (ret != 1) {
+ pr_err(PFX "cannot get counter value\n");
+ if (ret == 0)
+ ret = -EINVAL;
+ return ret;
+ }
+ if (osnib_reset > WDOG_COUNTER_MAX_VALUE)
+ osnib_reset = WDOG_COUNTER_MAX_VALUE;
+ osnib_reset = ((osnib_reset << OSNIB_WDOG_COUNTER_SHIFT) &
+ OSNIB_WDOG_COUNTER_MASK);
+ ret = intel_scu_ipc_write_osnib_wd(&osnib_reset);
+
+ if (ret != 0) {
+ pr_err(PFX "cannot write OSNIB\n");
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+
+static ssize_t counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned char osnib_read = (unsigned char)0;
+ int ret;
+ pr_debug("%s\n", __func__);
+
+ ret = intel_scu_ipc_read_osnib_wd(&osnib_read);
+
+ if (ret != 0)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", (int)((osnib_read & OSNIB_WDOG_COUNTER_MASK)
+ >> OSNIB_WDOG_COUNTER_SHIFT));
+}
+
+static int reset_type_to_string(int reset_type, char *string)
+{
+ switch (reset_type) {
+ case SCU_COLD_BOOT_ON_TIMEOUT:
+ strcpy(string, STRING_COLD_BOOT);
+ break;
+ case SCU_COLD_RESET_ON_TIMEOUT:
+ strcpy(string, STRING_COLD_RESET);
+ break;
+ case SCU_COLD_OFF_ON_TIMEOUT:
+ strcpy(string, STRING_COLD_OFF);
+ break;
+#ifdef CONFIG_DEBUG_FS
+ case SCU_DO_NOTHING_ON_TIMEOUT:
+ /* The IPC command DONOTHING is provided */
+ /* for debug purpose only. */
+ strcpy(string, STRING_NONE);
+ break;
+#endif
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int string_to_reset_type(const char *string, int *reset_type)
+{
+ if (!reset_type || !string)
+ return 1;
+
+ if (strncmp(string, STRING_COLD_RESET,
+ sizeof(STRING_COLD_RESET) - 1) == 0) {
+ *reset_type = SCU_COLD_RESET_ON_TIMEOUT;
+ return 0;
+ }
+ if (strncmp(string, STRING_COLD_BOOT,
+ sizeof(STRING_COLD_BOOT) - 1) == 0) {
+ *reset_type = SCU_COLD_BOOT_ON_TIMEOUT;
+ return 0;
+ }
+ if (strncmp(string, STRING_COLD_OFF,
+ sizeof(STRING_COLD_OFF) - 1) == 0) {
+ *reset_type = SCU_COLD_OFF_ON_TIMEOUT;
+ return 0;
+ }
+#ifdef CONFIG_DEBUG_FS
+ if (strncmp(string, STRING_NONE,
+ sizeof(STRING_NONE) - 1) == 0) {
+ *reset_type = SCU_DO_NOTHING_ON_TIMEOUT;
+ return 0;
+ }
+#endif
+ /* We should not be here, this is an error case */
+ pr_debug("Invalid reset type value\n");
+ return 1;
+}
+
+static ssize_t reboot_ongoing_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ /* reprogram timeouts. if error : continue */
+ ret = watchdog_set_appropriate_timeouts();
+ if (ret)
+ pr_err("%s: could not set timeouts\n", __func__);
+
+ /* restore reset type */
+ watchdog_set_reset_type(watchdog_device.reboot_wd_action);
+ if (ret) {
+ pr_err("%s: could not set reset type\n", __func__);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t shutdown_ongoing_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ /* reprogram timeouts. if error : continue */
+ ret = watchdog_set_appropriate_timeouts();
+ if (ret)
+ pr_err("%s: could not set timeouts\n", __func__);
+
+ /* restore reset type */
+ watchdog_set_reset_type(watchdog_device.shutdown_wd_action);
+ if (ret) {
+ pr_err("%s: could not set reset type\n", __func__);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static ssize_t normal_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (reset_type_to_string(watchdog_device.normal_wd_action, buf) != 0)
+ return -EINVAL;
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t normal_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ if (string_to_reset_type(buf, &watchdog_device.normal_wd_action) != 0)
+ return -EINVAL;
+ if (watchdog_set_reset_type(watchdog_device.normal_wd_action) != 0)
+ return -EINVAL;
+
+ return size;
+}
+
+static ssize_t reboot_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (reset_type_to_string(watchdog_device.reboot_wd_action, buf) != 0)
+ return -EINVAL;
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t reboot_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ if (string_to_reset_type(buf, &watchdog_device.reboot_wd_action) != 0)
+ return -EINVAL;
+
+ return size;
+}
+
+static ssize_t shutdown_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (reset_type_to_string(watchdog_device.shutdown_wd_action, buf) != 0)
+ return -EINVAL;
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t shutdown_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ if (string_to_reset_type(buf, &watchdog_device.shutdown_wd_action) != 0)
+ return -EINVAL;
+
+ return size;
+}
+
+/* Watchdog behavior depending on system phase */
+static DEVICE_ATTR(normal_config, S_IWUSR | S_IRUGO,
+ normal_config_show, normal_config_store);
+static DEVICE_ATTR(reboot_config, S_IWUSR | S_IRUGO,
+ reboot_config_show, reboot_config_store);
+static DEVICE_ATTR(shutdown_config, S_IWUSR | S_IRUGO,
+ shutdown_config_show, shutdown_config_store);
+static DEVICE_ATTR(reboot_ongoing, S_IWUSR,
+ NULL, reboot_ongoing_store);
+static DEVICE_ATTR(shutdown_ongoing, S_IWUSR,
+ NULL, shutdown_ongoing_store);
+
+/* Reset counter watchdog entry */
+static DEVICE_ATTR(counter, S_IWUSR | S_IRUGO,
+ counter_show, counter_store);
+
+
+int create_watchdog_sysfs_files(void)
+{
+ int ret;
+
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_disable);
+ if (ret) {
+ pr_warn("cant register dev file for disable\n");
+ return ret;
+ }
+#endif
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_normal_config);
+ if (ret) {
+ pr_warn("cant register dev file for normal_config\n");
+ return ret;
+ }
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_reboot_config);
+ if (ret) {
+ pr_warn("cant register dev file for reboot_config\n");
+ return ret;
+ }
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_shutdown_config);
+ if (ret) {
+ pr_warn("cant register dev file for shutdown_config\n");
+ return ret;
+ }
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_counter);
+ if (ret) {
+ pr_warn("cant register dev file for counter\n");
+ return ret;
+ }
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_reboot_ongoing);
+ if (ret) {
+ pr_warn("cant register dev file for reboot_ongoing\n");
+ return ret;
+ }
+
+ ret = device_create_file(watchdog_device.miscdev.this_device,
+ &dev_attr_shutdown_ongoing);
+ if (ret) {
+ pr_warn("cant register dev file for shutdown_ongoing\n");
+ return ret;
+ }
+ return 0;
+}
+
+int remove_watchdog_sysfs_files(void)
+{
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_disable);
+#endif
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_normal_config);
+
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_reboot_config);
+
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_shutdown_config);
+
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_counter);
+
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_reboot_ongoing);
+
+ device_remove_file(watchdog_device.miscdev.this_device,
+ &dev_attr_shutdown_ongoing);
+ return 0;
+}
+
+static int handle_mrfl_dev_ioapic(int irq)
+{
+ int ret = 0;
+ int ioapic;
+ struct io_apic_irq_attr irq_attr;
+
+ ioapic = mp_find_ioapic(irq);
+ if (ioapic >= 0) {
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ irq_attr.polarity = 0; /* Active high */
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ } else {
+ pr_warn("can not find interrupt %d in ioapic\n", irq);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Init code */
+static int intel_scu_watchdog_init(void)
+{
+ int ret = 0;
+
+ watchdog_device.normal_wd_action = SCU_COLD_RESET_ON_TIMEOUT;
+ watchdog_device.reboot_wd_action = SCU_COLD_RESET_ON_TIMEOUT;
+ watchdog_device.shutdown_wd_action = SCU_COLD_OFF_ON_TIMEOUT;
+
+#ifdef CONFIG_DEBUG_FS
+ watchdog_device.panic_reboot_notifier = false;
+#endif /* CONFIG_DEBUG_FS */
+
+ /* Initially, we are not in shutdown mode */
+ watchdog_device.shutdown_flag = false;
+
+ /* Since timeout can be set by MODULE_PARAM, need to reset pre_timeout */
+ pre_timeout = timeout-15;
+
+ /* Check timeouts boot parameter */
+ if (check_timeouts(pre_timeout, timeout)) {
+ pr_err("%s: Invalid timeouts\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Reboot notifier */
+ watchdog_device.reboot_notifier.notifier_call = reboot_notifier;
+ watchdog_device.reboot_notifier.priority = 1;
+ ret = register_reboot_notifier(&watchdog_device.reboot_notifier);
+ if (ret) {
+ pr_crit("cannot register reboot notifier %d\n", ret);
+ goto error_stop_timer;
+ }
+
+ /* Do not publish the watchdog device when disable (TO BE REMOVED) */
+ if (!disable_kernel_watchdog) {
+ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+ watchdog_device.miscdev.name = "watchdog";
+ watchdog_device.miscdev.fops = &intel_scu_fops;
+
+ ret = misc_register(&watchdog_device.miscdev);
+ if (ret) {
+ pr_crit("Cannot register miscdev %d err =%d\n",
+ WATCHDOG_MINOR, ret);
+ goto error_reboot_notifier;
+ }
+ }
+
+ /* MSI #12 handler to dump registers */
+ handle_mrfl_dev_ioapic(EXT_TIMER0_MSI);
+ ret = request_irq((unsigned int)EXT_TIMER0_MSI,
+ watchdog_warning_interrupt,
+ IRQF_SHARED|IRQF_NO_SUSPEND, "watchdog",
+ &watchdog_device);
+ if (ret) {
+ pr_err("error requesting warning irq %d\n",
+ EXT_TIMER0_MSI);
+ pr_err("error value returned is %d\n", ret);
+ goto error_misc_register;
+ }
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+ init_timer(&softlock_timer);
+#endif
+
+ if (disable_kernel_watchdog) {
+ pr_err("%s: Disable kernel watchdog\n", __func__);
+
+ /* Make sure timer is stopped */
+ ret = watchdog_stop();
+ if (ret != 0)
+ pr_debug("cant disable timer\n");
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ ret = create_debugfs_entries();
+ if (ret) {
+ pr_err("%s: Error creating debugfs entries\n", __func__);
+ goto error_debugfs_entry;
+ }
+#endif
+
+ watchdog_device.started = false;
+
+ ret = create_watchdog_sysfs_files();
+ if (ret) {
+ pr_err("%s: Error creating debugfs entries\n", __func__);
+ goto error_sysfs_entry;
+ }
+
+ return ret;
+
+error_sysfs_entry:
+ /* Nothing special to do */
+#ifdef CONFIG_DEBUG_FS
+error_debugfs_entry:
+ /* Remove entries done by create function */
+#endif
+
+error_misc_register:
+ misc_deregister(&watchdog_device.miscdev);
+
+error_reboot_notifier:
+ unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+
+error_stop_timer:
+ watchdog_stop();
+
+ return ret;
+}
+
+static void intel_scu_watchdog_exit(void)
+{
+ int ret = 0;
+
+ remove_watchdog_sysfs_files();
+#ifdef CONFIG_DEBUG_FS
+ remove_debugfs_entries();
+#endif
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+ del_timer_sync(&softlock_timer);
+#endif
+
+ ret = watchdog_stop();
+ if (ret != 0)
+ pr_err("cant disable timer\n");
+
+ misc_deregister(&watchdog_device.miscdev);
+ unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+}
+
+static int watchdog_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed watchdog rpmsg device\n");
+
+ /* Allocate rpmsg instance for watchdog*/
+ ret = alloc_rpmsg_instance(rpdev, &watchdog_instance);
+ if (!watchdog_instance) {
+ dev_err(&rpdev->dev, "kzalloc watchdog instance failed\n");
+ goto out;
+ }
+ /* Initialize rpmsg instance */
+ init_rpmsg_instance(watchdog_instance);
+ /* Init scu watchdog */
+ ret = intel_scu_watchdog_init();
+
+ if (ret)
+ free_rpmsg_instance(rpdev, &watchdog_instance);
+out:
+ return ret;
+}
+
+static void watchdog_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ intel_scu_watchdog_exit();
+ free_rpmsg_instance(rpdev, &watchdog_instance);
+ dev_info(&rpdev->dev, "Removed watchdog rpmsg device\n");
+}
+
+static void watchdog_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id watchdog_rpmsg_id_table[] = {
+ { .name = "rpmsg_watchdog" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, watchdog_rpmsg_id_table);
+
+static struct rpmsg_driver watchdog_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = watchdog_rpmsg_id_table,
+ .probe = watchdog_rpmsg_probe,
+ .callback = watchdog_rpmsg_cb,
+ .remove = watchdog_rpmsg_remove,
+};
+
+static int __init watchdog_rpmsg_init(void)
+{
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ return register_rpmsg_driver(&watchdog_rpmsg);
+ else {
+ pr_err("%s: watchdog driver: bad platform\n", __func__);
+ return -ENODEV;
+ }
+}
+
+#ifdef MODULE
+module_init(watchdog_rpmsg_init);
+#else
+rootfs_initcall(watchdog_rpmsg_init);
+#endif
+
+static void __exit watchdog_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&watchdog_rpmsg);
+}
+module_exit(watchdog_rpmsg_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_AUTHOR("mark.a.allyn@intel.com");
+MODULE_AUTHOR("yannx.puech@intel.com");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
--- /dev/null
+/*
+ * Intel_SCU 0.3: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#define PFX "intel_scu_watchdog: "
+#define WDT_VER "0.3"
+
+#define DEFAULT_PRETIMEOUT 75
+#define DEFAULT_TIMEOUT 90
+
+/* Value 0 to reset the reset counter */
+#define OSNIB_WRITE_VALUE 0
+
+struct intel_scu_watchdog_dev {
+ ulong driver_open;
+ ulong driver_closed;
+ bool started;
+ struct notifier_block reboot_notifier;
+ struct miscdevice miscdev;
+ bool shutdown_flag;
+ int reset_type;
+ int normal_wd_action;
+ int reboot_wd_action;
+ int shutdown_wd_action;
+#ifdef CONFIG_DEBUG_FS
+ bool panic_reboot_notifier;
+ struct dentry *dfs_wd;
+ struct dentry *dfs_secwd;
+ struct dentry *dfs_secwd_trigger;
+ struct dentry *dfs_kwd;
+ struct dentry *dfs_kwd_trigger;
+ struct dentry *dfs_kwd_reset_type;
+ struct dentry *dfs_kwd_panic_reboot;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+#endif /* __INTEL_SCU_WATCHDOG_H */
fw-shipped-$(CONFIG_USB_VICAM) += vicam/firmware.fw
fw-shipped-$(CONFIG_VIDEO_CPIA2) += cpia2/stv0672_vp4.bin
fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
+fw-shipped-$(CONFIG_INTEL_MID_REMOTEPROC) += intel_mid/intel_mid_remoteproc.fw
fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
--- /dev/null
+:100000007F454C4601010100000000000000000097
+:100010000200030001000000240000003400000082
+:10002000F010000000000000340020000100280053
+:1000300003000200040000000000000000000000B7
+:1000400000000000000000000000000007000000A9
+:10005000040000000000000000000000000000009C
+:100060000000000000000000000000000000000090
+:100070000000000000000000000000000000000080
+:100080000000000000000000000000000000000070
+:100090000000000000000000000000000000000060
+:1000A0000000000000000000000000000000000050
+:1000B0000000000000000000000000000000000040
+:1000C0000000000000000000000000000000000030
+:1000D0000000000000000000000000000000000020
+:1000E0000000000000000000000000000000000010
+:1000F0000000000000000000000000000000000000
+:1001000000000000000000000000000000000000EF
+:1001100000000000000000000000000000000000DF
+:1001200000000000000000000000000000000000CF
+:1001300000000000000000000000000000000000BF
+:1001400000000000000000000000000000000000AF
+:10015000000000000000000000000000000000009F
+:10016000000000000000000000000000000000008F
+:10017000000000000000000000000000000000007F
+:10018000000000000000000000000000000000006F
+:10019000000000000000000000000000000000005F
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:1002100000000000000000000000000000000000DE
+:1002200000000000000000000000000000000000CE
+:1002300000000000000000000000000000000000BE
+:1002400000000000000000000000000000000000AE
+:10025000000000000000000000000000000000009E
+:10026000000000000000000000000000000000008E
+:10027000000000000000000000000000000000007E
+:10028000000000000000000000000000000000006E
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B000000000000000000000000000000000003D
+:1003C000000000000000000000000000000000002D
+:1003D000000000000000000000000000000000001D
+:1003E000000000000000000000000000000000000D
+:1003F00000000000000000000000000000000000FD
+:1004000000000000000000000000000000000000EC
+:1004100000000000000000000000000000000000DC
+:1004200000000000000000000000000000000000CC
+:1004300000000000000000000000000000000000BC
+:1004400000000000000000000000000000000000AC
+:10045000000000000000000000000000000000009C
+:10046000000000000000000000000000000000008C
+:10047000000000000000000000000000000000007C
+:10048000000000000000000000000000000000006C
+:10049000000000000000000000000000000000005C
+:1004A000000000000000000000000000000000004C
+:1004B000000000000000000000000000000000003C
+:1004C000000000000000000000000000000000002C
+:1004D000000000000000000000000000000000001C
+:1004E000000000000000000000000000000000000C
+:1004F00000000000000000000000000000000000FC
+:1005000000000000000000000000000000000000EB
+:1005100000000000000000000000000000000000DB
+:1005200000000000000000000000000000000000CB
+:1005300000000000000000000000000000000000BB
+:1005400000000000000000000000000000000000AB
+:10055000000000000000000000000000000000009B
+:10056000000000000000000000000000000000008B
+:10057000000000000000000000000000000000007B
+:10058000000000000000000000000000000000006B
+:10059000000000000000000000000000000000005B
+:1005A000000000000000000000000000000000004B
+:1005B000000000000000000000000000000000003B
+:1005C000000000000000000000000000000000002B
+:1005D000000000000000000000000000000000001B
+:1005E000000000000000000000000000000000000B
+:1005F00000000000000000000000000000000000FB
+:1006000000000000000000000000000000000000EA
+:1006100000000000000000000000000000000000DA
+:1006200000000000000000000000000000000000CA
+:1006300000000000000000000000000000000000BA
+:1006400000000000000000000000000000000000AA
+:10065000000000000000000000000000000000009A
+:10066000000000000000000000000000000000008A
+:10067000000000000000000000000000000000007A
+:10068000000000000000000000000000000000006A
+:10069000000000000000000000000000000000005A
+:1006A000000000000000000000000000000000004A
+:1006B000000000000000000000000000000000003A
+:1006C000000000000000000000000000000000002A
+:1006D000000000000000000000000000000000001A
+:1006E000000000000000000000000000000000000A
+:1006F00000000000000000000000000000000000FA
+:1007000000000000000000000000000000000000E9
+:1007100000000000000000000000000000000000D9
+:1007200000000000000000000000000000000000C9
+:1007300000000000000000000000000000000000B9
+:1007400000000000000000000000000000000000A9
+:100750000000000000000000000000000000000099
+:100760000000000000000000000000000000000089
+:100770000000000000000000000000000000000079
+:100780000000000000000000000000000000000069
+:100790000000000000000000000000000000000059
+:1007A0000000000000000000000000000000000049
+:1007B0000000000000000000000000000000000039
+:1007C0000000000000000000000000000000000029
+:1007D0000000000000000000000000000000000019
+:1007E0000000000000000000000000000000000009
+:1007F00000000000000000000000000000000000F9
+:1008000000000000000000000000000000000000E8
+:1008100000000000000000000000000000000000D8
+:1008200000000000000000000000000000000000C8
+:1008300000000000000000000000000000000000B8
+:1008400000000000000000000000000000000000A8
+:100850000000000000000000000000000000000098
+:100860000000000000000000000000000000000088
+:100870000000000000000000000000000000000078
+:100880000000000000000000000000000000000068
+:100890000000000000000000000000000000000058
+:1008A0000000000000000000000000000000000048
+:1008B0000000000000000000000000000000000038
+:1008C0000000000000000000000000000000000028
+:1008D0000000000000000000000000000000000018
+:1008E0000000000000000000000000000000000008
+:1008F00000000000000000000000000000000000F8
+:1009000000000000000000000000000000000000E7
+:1009100000000000000000000000000000000000D7
+:1009200000000000000000000000000000000000C7
+:1009300000000000000000000000000000000000B7
+:1009400000000000000000000000000000000000A7
+:100950000000000000000000000000000000000097
+:100960000000000000000000000000000000000087
+:100970000000000000000000000000000000000077
+:100980000000000000000000000000000000000067
+:100990000000000000000000000000000000000057
+:1009A0000000000000000000000000000000000047
+:1009B0000000000000000000000000000000000037
+:1009C0000000000000000000000000000000000027
+:1009D0000000000000000000000000000000000017
+:1009E0000000000000000000000000000000000007
+:1009F00000000000000000000000000000000000F7
+:100A000000000000000000000000000000000000E6
+:100A100000000000000000000000000000000000D6
+:100A200000000000000000000000000000000000C6
+:100A300000000000000000000000000000000000B6
+:100A400000000000000000000000000000000000A6
+:100A50000000000000000000000000000000000096
+:100A60000000000000000000000000000000000086
+:100A70000000000000000000000000000000000076
+:100A80000000000000000000000000000000000066
+:100A90000000000000000000000000000000000056
+:100AA0000000000000000000000000000000000046
+:100AB0000000000000000000000000000000000036
+:100AC0000000000000000000000000000000000026
+:100AD0000000000000000000000000000000000016
+:100AE0000000000000000000000000000000000006
+:100AF00000000000000000000000000000000000F6
+:100B000000000000000000000000000000000000E5
+:100B100000000000000000000000000000000000D5
+:100B200000000000000000000000000000000000C5
+:100B300000000000000000000000000000000000B5
+:100B400000000000000000000000000000000000A5
+:100B50000000000000000000000000000000000095
+:100B60000000000000000000000000000000000085
+:100B70000000000000000000000000000000000075
+:100B80000000000000000000000000000000000065
+:100B90000000000000000000000000000000000055
+:100BA0000000000000000000000000000000000045
+:100BB0000000000000000000000000000000000035
+:100BC0000000000000000000000000000000000025
+:100BD0000000000000000000000000000000000015
+:100BE0000000000000000000000000000000000005
+:100BF00000000000000000000000000000000000F5
+:100C000000000000000000000000000000000000E4
+:100C100000000000000000000000000000000000D4
+:100C200000000000000000000000000000000000C4
+:100C300000000000000000000000000000000000B4
+:100C400000000000000000000000000000000000A4
+:100C50000000000000000000000000000000000094
+:100C60000000000000000000000000000000000084
+:100C70000000000000000000000000000000000074
+:100C80000000000000000000000000000000000064
+:100C90000000000000000000000000000000000054
+:100CA0000000000000000000000000000000000044
+:100CB0000000000000000000000000000000000034
+:100CC0000000000000000000000000000000000024
+:100CD0000000000000000000000000000000000014
+:100CE0000000000000000000000000000000000004
+:100CF00000000000000000000000000000000000F4
+:100D000000000000000000000000000000000000E3
+:100D100000000000000000000000000000000000D3
+:100D200000000000000000000000000000000000C3
+:100D300000000000000000000000000000000000B3
+:100D400000000000000000000000000000000000A3
+:100D50000000000000000000000000000000000093
+:100D60000000000000000000000000000000000083
+:100D70000000000000000000000000000000000073
+:100D80000000000000000000000000000000000063
+:100D90000000000000000000000000000000000053
+:100DA0000000000000000000000000000000000043
+:100DB0000000000000000000000000000000000033
+:100DC0000000000000000000000000000000000023
+:100DD0000000000000000000000000000000000013
+:100DE0000000000000000000000000000000000003
+:100DF00000000000000000000000000000000000F3
+:100E000000000000000000000000000000000000E2
+:100E100000000000000000000000000000000000D2
+:100E200000000000000000000000000000000000C2
+:100E300000000000000000000000000000000000B2
+:100E400000000000000000000000000000000000A2
+:100E50000000000000000000000000000000000092
+:100E60000000000000000000000000000000000082
+:100E70000000000000000000000000000000000072
+:100E80000000000000000000000000000000000062
+:100E90000000000000000000000000000000000052
+:100EA0000000000000000000000000000000000042
+:100EB0000000000000000000000000000000000032
+:100EC0000000000000000000000000000000000022
+:100ED0000000000000000000000000000000000012
+:100EE0000000000000000000000000000000000002
+:100EF00000000000000000000000000000000000F2
+:100F000000000000000000000000000000000000E1
+:100F100000000000000000000000000000000000D1
+:100F200000000000000000000000000000000000C1
+:100F300000000000000000000000000000000000B1
+:100F400000000000000000000000000000000000A1
+:100F50000000000000000000000000000000000091
+:100F60000000000000000000000000000000000081
+:100F70000000000000000000000000000000000071
+:100F80000000000000000000000000000000000061
+:100F90000000000000000000000000000000000051
+:100FA0000000000000000000000000000000000041
+:100FB0000000000000000000000000000000000031
+:100FC0000000000000000000000000000000000021
+:100FD0000000000000000000000000000000000011
+:100FE0000000000000000000000000000000000001
+:100FF00000000000000000000000000000000000F1
+:1010000000000000000000000000000000000000E0
+:1010100000000000000000000000000000000000D0
+:1010200000000000000000000000000000000000C0
+:1010300000000000000000000000000000000000B0
+:10104000010000000200000000000000000000009D
+:10105000180000005C000000030000000700000012
+:10106000010000000100000001000000000000007D
+:10107000000200000000000001000000000100006C
+:10108000000000000000000000000000010000005F
+:10109000000100000000000000000000000000004F
+:1010A000000000000000000000020000000000003E
+:1010B0000000000000000000000000000000000030
+:1010C0000000000000000000000000000000000020
+:1010D00000000000002E7368737472746162002E49
+:1010E0007265736F757263655F7461626C65000031
+:1010F00000000000000000000000000000000000F0
+:1011000000000000000000000000000000000000DF
+:1011100000000000000000000B00000007000000BD
+:101120000300000040000000401000009400000098
+:10113000000000000000000020000000000000008F
+:10114000010000000300000000000000000000009B
+:10115000D41000001B000000000000000000000090
+:08116000010000000000000086
+:00000001FF
"to the inode key sigs; rc = [%d]\n", rc);
goto out;
}
+ /* TODO: Investigate side effect of truncating name if too long */
cipher_name_len =
- strlen(mount_crypt_stat->global_default_cipher_name);
+ min(strlen(mount_crypt_stat->global_default_cipher_name),
+ sizeof(crypt_stat->cipher)-1);
memcpy(crypt_stat->cipher,
mount_crypt_stat->global_default_cipher_name,
cipher_name_len);
int (*set_debounce)(struct gpio_chip *chip,
unsigned offset, unsigned debounce);
+ void (*set_pinmux)(int gpio, int alt);
+ int (*get_pinmux)(int gpio);
+
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale);
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val);
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name);
+int iio_channel_get_num(const struct iio_channel *chan);
#endif
int *val2,
long mask);
+ int (*read_all_raw)(struct iio_channel *chan,
+ int *val);
+
int (*write_raw)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
IIO_ALTVOLTAGE,
IIO_CCT,
IIO_PRESSURE,
+ IIO_RESISTANCE,
};
enum iio_modifier {
#define DMA_PREP_CIRCULAR_LIST (1 << 10)
+#define SST_MAX_DMA_LEN 4095
+#define SST_MAX_DMA_LEN_MRFLD 131071 /* 2^17 - 1 */
+
+
/*DMA mode configurations*/
enum intel_mid_dma_mode {
LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
struct dma_slave_config dma_slave;
};
+struct device *intel_mid_get_acpi_dma(const char *hid);
+dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan);
+dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan);
#endif /*__INTEL_MID_DMA_H__*/
--- /dev/null
+/*
+ * intel_mid_pm.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/errno.h>
+
+#ifndef INTEL_MID_PM_H
+#define INTEL_MID_PM_H
+
+#include <asm/intel-mid.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+
+/* Chip ID of Intel Atom SOC*/
+#define INTEL_ATOM_MRST 0x26
+#define INTEL_ATOM_MFLD 0x27
+#define INTEL_ATOM_CLV 0x35
+#define INTEL_ATOM_MRFLD 0x4a
+#define INTEL_ATOM_BYT 0x37
+
+static inline int platform_is(u8 model)
+{
+ return (boot_cpu_data.x86_model == model);
+}
+
+/* Register Type definitions */
+#define OSPM_REG_TYPE 0x0
+#define APM_REG_TYPE 0x1
+#define OSPM_MAX_POWER_ISLANDS 16
+#define OSPM_ISLAND_UP 0x0
+#define OSPM_ISLAND_DOWN 0x1
+/*Soft reset*/
+#define OSPM_ISLAND_SR 0x2
+
+/* North complex power islands definitions for APM block*/
+#define APM_GRAPHICS_ISLAND 0x1
+#define APM_VIDEO_DEC_ISLAND 0x2
+#define APM_VIDEO_ENC_ISLAND 0x4
+#define APM_GL3_CACHE_ISLAND 0x8
+#define APM_ISP_ISLAND 0x10
+#define APM_IPH_ISLAND 0x20
+
+/* North complex power islands definitions for OSPM block*/
+#define OSPM_DISPLAY_A_ISLAND 0x2
+#define OSPM_DISPLAY_B_ISLAND 0x80
+#define OSPM_DISPLAY_C_ISLAND 0x100
+#define OSPM_MIPI_ISLAND 0x200
+
+/* North Complex power islands definitions for Tangier */
+#define TNG_ISP_ISLAND 0x1
+/* North Complex Register definitions for Tangier */
+#define ISP_SS_PM0 0x39
+
+#define C4_STATE_IDX 3
+#define C6_STATE_IDX 4
+#define S0I1_STATE_IDX 5
+#define LPMP3_STATE_IDX 6
+#define S0I3_STATE_IDX 7
+
+#define C4_HINT (0x30)
+#define C6_HINT (0x52)
+
+#define CSTATE_EXIT_LATENCY_C1 1
+#define CSTATE_EXIT_LATENCY_C2 20
+#define CSTATE_EXIT_LATENCY_C4 100
+#define CSTATE_EXIT_LATENCY_C6 140
+
+/* Since entry latency is substantial
+ * put exit_latency = entry+exit latency
+ */
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define CSTATE_EXIT_LATENCY_S0i1 1200
+#define CSTATE_EXIT_LATENCY_S0i2 2000
+#define CSTATE_EXIT_LATENCY_S0i3 10000
+#else
+#define CSTATE_EXIT_LATENCY_LPMP3 1040
+#define CSTATE_EXIT_LATENCY_S0i1 1040
+#define CSTATE_EXIT_LATENCY_S0i3 2800
+#endif
+#define BYT_S0I1_STATE 0x60
+#define BYT_S0I2_STATE 0x62
+#define BYT_LPMP3_STATE 0x62
+#define BYT_S0I3_STATE 0x64
+
+enum s3_parts {
+ PROC_FRZ,
+ DEV_SUS,
+ NB_CPU_OFF,
+ NB_CPU_ON,
+ DEV_RES,
+ PROC_UNFRZ,
+ MAX_S3_PARTS
+};
+
+#ifdef CONFIG_ATOM_SOC_POWER
+#define LOG_PMU_EVENTS
+
+/* Error codes for pmu */
+#define PMU_SUCCESS 0
+#define PMU_FAILED -1
+#define PMU_BUSY_STATUS 0
+#define PMU_MODE_ID 1
+#define SET_MODE 1
+#define SET_AOAC_S0i1 2
+#define SET_AOAC_S0i3 3
+#define SET_LPAUDIO 4
+#define SET_AOAC_S0i2 7
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define MID_S0I1_STATE 0x60
+#define MID_S0I2_STATE 0x62
+#define MID_LPMP3_STATE 0x62
+#define MID_S0I3_STATE 0x64
+#else
+#define MID_S0I1_STATE 0x1
+#define MID_LPMP3_STATE 0x3
+#define MID_S0I2_STATE 0x7
+#define MID_S0I3_STATE 0x7
+#endif
+
+#define MID_S0IX_STATE 0xf
+#define MID_S3_STATE 0x1f
+#define MID_FAST_ON_OFF_STATE 0x3f
+
+/* combinations */
+#define MID_LPI1_STATE 0x1f
+#define MID_LPI3_STATE 0x7f
+#define MID_I1I3_STATE 0xff
+
+#define REMOVE_LP_FROM_LPIX 4
+
+/* Power number for MID_POWER */
+#define C0_POWER_USAGE 450
+#define C6_POWER_USAGE 200
+#define LPMP3_POWER_USAGE 130
+#define S0I1_POWER_USAGE 50
+#define S0I3_POWER_USAGE 31
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern void pmu_s0ix_demotion_stat(int req_state, int grant_state);
+extern unsigned int pmu_get_new_cstate(unsigned int cstate, int *index);
+extern int get_target_platform_state(unsigned long *eax);
+extern int mid_s0ix_enter(int);
+extern int pmu_set_devices_in_d0i0(void);
+extern int pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t pmu_pci_choose_state(struct pci_dev *pdev);
+
+extern void time_stamp_in_suspend_flow(int mark, bool start);
+extern void time_stamp_for_sleep_state_latency(int sleep_state,
+ bool start, bool entry);
+extern int mid_state_to_sys_state(int mid_state);
+extern void pmu_power_off(void);
+extern void pmu_set_s0ix_complete(void);
+extern bool pmu_is_s0ix_in_progress(void);
+extern int pmu_nc_set_power_state
+ (int islands, int state_type, int reg_type);
+extern int pmu_nc_get_power_state(int island, int reg_type);
+extern int pmu_set_emmc_to_d0i0_atomic(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_ipc(u32 command);
+extern void pmu_log_ipc_irq(void);
+#else
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+#endif
+extern void dump_nc_power_history(void);
+
+extern bool mid_pmu_is_wake_source(u32 lss_number);
+
+extern void (*nc_report_power_state) (u32, int);
+#else
+
+/*
+ * If CONFIG_ATOM_SOC_POWER is not defined
+ * fall back to C6
+ */
+
+#define MID_S0I1_STATE C6_HINT
+#define MID_LPMP3_STATE C6_HINT
+#define MID_S0I3_STATE C6_HINT
+#define MID_S3_STATE C6_HINT
+#define MID_FAST_ON_OFF_STATE C6_HINT
+
+/* Power usage unknown if MID_POWER not defined */
+#define C0_POWER_USAGE 0
+#define C6_POWER_USAGE 0
+#define LPMP3_POWER_USAGE 0
+#define S0I1_POWER_USAGE 0
+#define S0I3_POWER_USAGE 0
+
+#define TEMP_DTS_ID 43
+
+static inline int pmu_nc_set_power_state
+ (int islands, int state_type, int reg_type) { return 0; }
+static inline int pmu_nc_get_power_state(int island, int reg_type) { return 0; }
+
+static inline void pmu_set_s0ix_complete(void) { return; }
+static inline bool pmu_is_s0ix_in_progress(void) { return false; };
+static inline unsigned int pmu_get_new_cstate
+ (unsigned int cstate, int *index) { return cstate; };
+
+/*returns function not implemented*/
+static inline void time_stamp_in_suspend_flow(int mark, bool start) {}
+static inline void time_stamp_for_sleep_state_latency(int sleep_state,
+ bool start, bool entry) {}
+static inline int mid_state_to_sys_state(int mid_state) { return 0; }
+
+static inline int pmu_set_devices_in_d0i0(void) { return 0; }
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+static inline int pmu_set_emmc_to_d0i0_atomic(void) { return -ENOSYS; }
+static inline void pmu_power_off(void) { return; }
+static inline bool mid_pmu_is_wake_source(u32 lss_number) { return false; }
+#endif /* #ifdef CONFIG_ATOM_SOC_POWER */
+
+#endif /* #ifndef INTEL_MID_PM_H */
--- /dev/null
+/*
+ * include/linux/intel_pidv_acpi.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Vincent Tinelli (vincent.tinelli@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _INTEL_PIDV_ACPI_H
+#define _INTEL_PIDV_ACPI_H
+
+#include <linux/acpi.h>
+#ifdef CONFIG_ACPI
+#define ACPI_SIG_PIDV "PIDV"
+
+#define pidv_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0440, \
+ }, \
+ .show = _name##_show, \
+}
+
+struct platform_id {
+ u8 part_number[32];
+ u8 ext_id_1[32];
+ u8 ext_id_2[32];
+ u8 uuid[16];
+ u8 iafw_major;
+ u8 iafw_minor;
+ u8 secfw_major;
+ u8 secfw_minor;
+};
+
+struct acpi_table_pidv {
+ struct acpi_table_header header;
+ struct platform_id pidv;
+};
+
+#endif
+#endif
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_CHAINED = (1 << 18),
};
#define IRQF_MODIFY_MASK \
return desc->action != NULL;
}
+/* Test to see if the IRQ is chained */
+static inline int irq_is_chained(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_CHAINED;
+}
+
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
--- /dev/null
+#ifndef _H_LANGWELL_GPIO_H
+#define _H_LANGWELL_GPIO_H
+
+enum {
+ LNW_GPIO = 0,
+ LNW_ALT_1 = 1,
+ LNW_ALT_2 = 2,
+ LNW_ALT_3 = 3,
+};
+
+void lnw_gpio_set_alt(int gpio, int alt);
+int gpio_get_alt(int gpio);
+
+#endif
#ifndef __LINUX_MFD_INTEL_MSIC_H__
#define __LINUX_MFD_INTEL_MSIC_H__
+#include <asm/intel_mid_gpadc.h>
+
/* ID */
#define INTEL_MSIC_ID0 0x000 /* RO */
#define INTEL_MSIC_ID1 0x001 /* RO */
#define INTEL_MSIC_PBCONFIG 0x03e
#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
+/*
+ * MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
+ * Since IRQ block starts from address 0x002 we need to substract that from
+ * the actual IRQ status register address.
+ */
+#define MSIC_IRQ_STATUS(x) (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
+#define MSIC_IRQ_STATUS_ACCDET MSIC_IRQ_STATUS(INTEL_MSIC_ACCDET)
+#define MSIC_IRQ_STATUS_OCAUDIO MSIC_IRQ_STATUS(INTEL_MSIC_OCAUDIO)
+
/* GPIO */
#define INTEL_MSIC_GPIO0LV7CTLO 0x040
#define INTEL_MSIC_GPIO0LV6CTLO 0x041
/**
* struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
* @gpio_base: base number for the GPIOs
+ * @ngpio_lv: number of low voltage GPIOs
+ * @ngpio_hv: number of high voltage GPIOs
+ * @gpio0_lv_ctlo: low voltage GPIO0 output control register
+ * @gpio0_lv_ctli: low voltage GPIO0 input control register
+ * @gpio0_hv_ctlo: high voltage GPIO0 output control register
+ * @gpio0_hv_ctli: high voltage GPIO0 input control register
+ * @can_sleep: flag for gpio chip
*/
struct intel_msic_gpio_pdata {
unsigned gpio_base;
+ int ngpio_lv;
+ int ngpio_hv;
+ u16 gpio0_lv_ctlo;
+ u16 gpio0_lv_ctli;
+ u16 gpio0_hv_ctlo;
+ u16 gpio0_hv_ctli;
+ int can_sleep;
+};
+
+#define DISABLE_VCRIT 0x01
+#define DISABLE_VWARNB 0x02
+#define DISABLE_VWARNA 0x04
+/**
+ * struct intel_msic_vdd_pdata - platform data for the MSIC VDD driver
+ * @msi: MSI number used for VDD interrupts
+ *
+ * The MSIC CTP driver converts @msi into an IRQ number and passes it to
+ * the VDD driver as %IORESOURCE_IRQ.
+ */
+struct intel_msic_vdd_pdata {
+ unsigned msi;
+ /* 1 = VCRIT, 2 = WARNB, 4 = WARNA */
+ u8 disable_unused_comparator;
};
/**
int irq[INTEL_MSIC_BLOCK_LAST];
struct intel_msic_gpio_pdata *gpio;
struct intel_msic_ocd_pdata *ocd;
+ struct intel_mid_gpadc_platform_data *gpadc;
};
struct intel_msic;
int rate;
};
+struct wm8958_custom_config {
+ int format;
+ int rate;
+ int channels;
+};
+
struct wm8994_pdata {
int gpio_base;
*/
int micdet_delay;
+ /* Delay between microphone detect completing and reporting on
+ * insert (specified in ms)
+ */
+ int mic_id_delay;
+
+ /* Keep MICBIAS2 high for micb_en_delay, during jack insertion
+ * removal
+ */
+ int micb_en_delay;
+
/* IRQ for microphone detection if brought out directly as a
* signal.
*/
* lines is mastered.
*/
int max_channels_clocked[WM8994_NUM_AIF];
+
+ /* custom config for overriding the hw params */
+ struct wm8958_custom_config *custom_cfg;
};
#endif
/*
* R772 (0x304) - AIF1ADC LRCLK
*/
+#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */
#define WM8994_AIF1ADC_LRCLK_DIR 0x0800 /* AIF1ADC_LRCLK_DIR */
#define WM8994_AIF1ADC_LRCLK_DIR_MASK 0x0800 /* AIF1ADC_LRCLK_DIR */
#define WM8994_AIF1ADC_LRCLK_DIR_SHIFT 11 /* AIF1ADC_LRCLK_DIR */
/*
* R773 (0x305) - AIF1DAC LRCLK
*/
+#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */
#define WM8994_AIF1DAC_LRCLK_DIR 0x0800 /* AIF1DAC_LRCLK_DIR */
#define WM8994_AIF1DAC_LRCLK_DIR_MASK 0x0800 /* AIF1DAC_LRCLK_DIR */
#define WM8994_AIF1DAC_LRCLK_DIR_SHIFT 11 /* AIF1DAC_LRCLK_DIR */
#define MMC_HIGH_52_MAX_DTR 52000000
#define MMC_HIGH_DDR_MAX_DTR 52000000
#define MMC_HS200_MAX_DTR 200000000
+#define MMC_HS400_MAX_DTR 200000000
unsigned int sectors;
unsigned int card_type;
unsigned int hc_erase_size; /* In sectors */
unsigned int hpi_cmd; /* cmd used as HPI */
bool bkops; /* background support bit */
bool bkops_en; /* background enable bit */
+ unsigned int rpmb_size; /* Units: half sector */
unsigned int data_sector_size; /* 512 bytes or 4KB */
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
#define MMC_CARD_SDXC (1<<6) /* card is SDXC */
#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
+#define MMC_STATE_HIGHSPEED_400 (1<<9) /* card is in HS400 mode */
#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
/* byte mode */
+#define MMC_QUIRK_NON_STD_CIS (1<<11)
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
+
+ unsigned int rpmb_max_req;
};
/*
#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
#define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200)
+#define mmc_card_hs400(c) ((c)->state & MMC_STATE_HIGHSPEED_400)
#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
#define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200)
+#define mmc_card_set_hs400(c) ((c)->state |= MMC_STATE_HIGHSPEED_400)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
+extern int mmc_rpmb_req_handle(struct device *emmc,
+ struct mmc_ioc_rpmb_req *req);
+
#endif /* LINUX_MMC_CARD_H */
#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mmc/ioctl.h>
struct request;
struct mmc_data;
unsigned int sg_len; /* size of scatter list */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
+ dma_addr_t dmabuf; /* used in panic mode */
};
struct mmc_host;
struct mmc_host *host;
};
+/*
+ * RPMB frame structure for MMC core stack
+ */
+struct mmc_core_rpmb_req {
+ struct mmc_ioc_rpmb_req *req;
+ __u8 *frame;
+ bool ready;
+};
+
+#define RPMB_PROGRAM_KEY 1 /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 2 /* Read RPMB write counter */
+#define RPMB_WRITE_DATA 3 /* Write data to RPMB partition */
+#define RPMB_READ_DATA 4 /* Read data from RPMB partition */
+#define RPMB_RESULT_READ 5 /* Read result request */
+#define RPMB_REQ 1 /* RPMB request mark */
+#define RPMB_RESP (1 << 1)/* RPMB response mark */
+#define RPMB_AVALIABLE_SECTORS 8 /* 4K page size */
+
+#define RPMB_TYPE_BEG 510
+#define RPMB_RES_BEG 508
+#define RPMB_BLKS_BEG 506
+#define RPMB_ADDR_BEG 504
+#define RPMB_WCOUNTER_BEG 500
+
+#define RPMB_NONCE_BEG 484
+#define RPMB_DATA_BEG 228
+#define RPMB_MAC_BEG 196
+
struct mmc_card;
struct mmc_async_req;
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
+extern int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *,
+ struct mmc_card *);
+extern int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *, struct mmc_card *);
+extern void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
#define MMC_TIMING_UHS_SDR104 6
#define MMC_TIMING_UHS_DDR50 7
#define MMC_TIMING_MMC_HS200 8
+#define MMC_TIMING_MMC_HS400 9
#define MMC_SDR_MODE 0
#define MMC_1_2V_DDR_MODE 1
#define MMC_SET_DRIVER_TYPE_D 3
};
+struct mmc_panic_host;
+
+struct mmc_host_panic_ops {
+ void (*request)(struct mmc_panic_host *, struct mmc_request *);
+ void (*prepare)(struct mmc_panic_host *);
+ int (*setup)(struct mmc_panic_host *);
+ void (*set_ios)(struct mmc_panic_host *);
+ void (*dumpregs)(struct mmc_panic_host *);
+ int (*power_on)(struct mmc_panic_host *);
+ int (*hold_mutex)(struct mmc_panic_host *);
+ void (*release_mutex)(struct mmc_panic_host *);
+};
+
struct mmc_host_ops {
/*
* 'enable' is called when the host is claimed and 'disable' is called
int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
+ void (*set_dev_power)(struct mmc_host *, bool);
+ /* Prevent host controller from Auto Clock Gating by busy reading */
+ void (*busy_wait)(struct mmc_host *mmc, u32 delay);
};
struct mmc_card;
struct regulator *vqmmc; /* Optional Vccq supply */
};
+struct mmc_panic_host {
+ /*
+ * DMA buffer for the log
+ */
+ dma_addr_t dmabuf;
+ void *logbuf;
+ const struct mmc_host_panic_ops *panic_ops;
+ unsigned int panic_ready;
+ unsigned int totalsecs;
+ unsigned int max_blk_size;
+ unsigned int max_blk_count;
+ unsigned int max_req_size;
+ unsigned int blkaddr;
+ unsigned int caps;
+ u32 ocr; /* the current OCR setting */
+ struct mmc_ios ios; /* current io bus settings */
+ struct mmc_card *card;
+ struct mmc_host *mmc;
+ void *priv;
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
MMC_CAP2_PACKED_WR)
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
+#define MMC_CAP2_INIT_CARD_SYNC (1 << 15) /* init card in sync mode */
+#define MMC_CAP2_POLL_R1B_BUSY (1 << 16) /* host poll R1B busy*/
+#define MMC_CAP2_RPMBPART_NOACC (1 << 17) /* RPMB partition no access */
+#define MMC_CAP2_LED_SUPPORT (1 << 18) /* led support */
+#define MMC_CAP2_PWCTRL_POWER (1 << 19) /* power control card power */
+#define MMC_CAP2_FIXED_NCRC (1 << 20) /* fixed NCRC */
+#define MMC_CAP2_HS200_WA (1 << 21) /* WA: 100MHz clock in HS200 */
+#define MMC_CAP2_HS400_1_8V_DDR (1 << 22) /* support HS400 */
+#define MMC_CAP2_HS400_1_2V_DDR (1 << 23) /* support HS400 */
+#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V_DDR | \
+ MMC_CAP2_HS400_1_2V_DDR)
mmc_pm_flag_t pm_caps; /* supported pm features */
unsigned int slotno; /* used for sdio acpi binding */
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ struct {
+ struct sdio_cis *cis;
+ struct sdio_cccr *cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+ } embedded_sdio_data;
+#endif
+ struct mmc_panic_host *phost;
unsigned long private[0] ____cacheline_aligned;
};
+#define SECTOR_SIZE 512
+int mmc_emergency_init(void);
+int mmc_emergency_write(char *, unsigned int);
+void mmc_alloc_panic_host(struct mmc_host *, const struct mmc_host_panic_ops *);
+void mmc_emergency_setup(struct mmc_host *host);
+
struct mmc_host *mmc_alloc_host(int extra, struct device *);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
+#define EXT_CSD_PWR_CL_200_DDR_195 253 /* RO, support HS400 */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
#define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */
#define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */
#define EXT_CSD_CARD_TYPE_MASK 0x3F /* Mask out reserved bits */
+#define EXT_CSD_CARD_TYPE_MASK_FULL 0xFF /* Support HS400 */
#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */
/* DDR mode @1.8V or 3V I/O */
#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */
#define EXT_CSD_CARD_TYPE_SDR_1_8V (1<<4) /* Card can run at 200MHz */
#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */
/* SDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz */
+#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz */
+ /* DDR mode @1.8/1.2v I/O */
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
int slotno;
int rst_n_gpio; /* Set to -EINVAL if unused */
int cd_gpio; /* Set to -EINVAL if unused */
+ int quirks;
+ int quirks2;
+ int platform_quirks; /* Platform related quirks */
int (*setup)(struct sdhci_pci_data *data);
void (*cleanup)(struct sdhci_pci_data *data);
+ int (*power_up)(void *data);
+ void (*register_embedded_control)(void *dev_id,
+ void (*virtual_cd)(void *dev_id, int card_present));
+ int (*flis_check)(void *data, unsigned int clk);
};
+/* Some Pre-Silicon platform not support all SDHCI HCs of the SoC */
+#define PLFM_QUIRK_NO_HOST_CTRL_HW (1<<0)
+/* Some Pre-Silicon platform do not support eMMC boot partition access */
+#define PLFM_QUIRK_NO_EMMC_BOOT_PART (1<<1)
+/* Some Pre-Silicon platform do not support eMMC or SD High Speed */
+#define PLFM_QUIRK_NO_HIGH_SPEED (1<<2)
+/* For the platform which don't have the SD card slot */
+#define PLFM_QUIRK_NO_SDCARD_SLOT (1<<3)
+
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
#define SDHCI_QUIRK2_NO_1_8_V (1<<2)
#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
+/* Intel private quirk2 starts on 15 */
+
+/* V2.0 host controller support DDR50 */
+#define SDHCI_QUIRK2_V2_0_SUPPORT_DDR50 (1<<15)
+/* Controller has bug when enabling Auto CMD23 */
+#define SDHCI_QUIRK2_BROKEN_AUTO_CMD23 (1<<16)
+/* HC Reg High Speed must be set later than HC2 Reg 1.8v Signaling Enable */
+#define SDHCI_QUIRK2_HIGH_SPEED_SET_LATE (1<<17)
+/* BRCM voltage support: advertise 2.0v support and force using 1.8v instead */
+#define SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8 (1<<18)
+/* to allow mmc_detect to detach the bus */
+#define SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE (1<<19)
+/* avoid detect/rescan/poweoff operations on suspend/resume. */
+#define SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY (1<<20)
+/* Disable eMMC/SD card High speed feature. */
+#define SDHCI_QUIRK2_DISABLE_HIGH_SPEED (1<<21)
+#define SDHCI_QUIRK2_CAN_VDD_300 (1<<22)
+#define SDHCI_QUIRK2_CAN_VDD_330 (1<<23)
+#define SDHCI_QUIRK2_2MS_DELAY (1<<24)
+#define SDHCI_QUIRK2_WAIT_FOR_IDLE (1<<25)
+/* BAD sd cd in HOST IC. This will cause system hang when removing SD */
+#define SDHCI_QUIRK2_BAD_SD_CD (1<<26)
+#define SDHCI_QUIRK2_POWER_PIN_GPIO_MODE (1<<27)
+#define SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8 (1<<28)
+#define SDHCI_QUIRK2_NON_STD_CIS (1<<29)
+#define SDHCI_QUIRK2_TUNING_POLL (1<<30)
+
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
+ /*
+ * XXX: SCU/X86 mutex variables base address in shared SRAM
+ * NOTE: Max size of this struct is 16 bytes
+ * without shared SRAM re-organization.
+ */
+ void __iomem *sram_addr; /* Shared SRAM address */
+
+ void __iomem *rte_addr; /* IOAPIC RTE register address */
+
+#define DEKKER_EMMC_OWNER_OFFSET 0
+#define DEKKER_IA_REQ_OFFSET 0x04
+#define DEKKER_SCU_REQ_OFFSET 0x08
+/* 0xc offset: state of the emmc chip to SCU. */
+#define DEKKER_EMMC_STATE 0x0c
+#define DEKKER_OWNER_IA 0
+#define DEKKER_OWNER_SCU 1
+#define DEKKER_EMMC_CHIP_ACTIVE 0
+#define DEKKER_EMMC_CHIP_SUSPENDED 1
+
+ unsigned int usage_cnt; /* eMMC mutex usage count */
+
const struct sdhci_ops *ops; /* Low level hw interface */
struct regulator *vmmc; /* Power regulator (vmmc) */
#endif
spinlock_t lock; /* Mutex */
+ spinlock_t dekker_lock; /* eMMC Dekker Mutex lock */
int flags; /* Host attributes */
#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
+#define SDHCI_POWER_CTRL_DEV (1<<12) /* ctrl dev power */
unsigned int version; /* SDHCI spec. version */
u8 pwr; /* Current voltage */
bool runtime_suspended; /* Host is runtime suspended */
+ bool suspended; /* Host is suspended */
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
+ unsigned int r1b_busy_end:1; /* R1B busy end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
/*
* Vendors and devices. Sort key: vendor first, device next.
*/
+#define IWL_SDIO_DEVICE_ID_WKP1 0x3160
+#define IWL_SDIO_DEVICE_ID_WKP2 0x7260
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
--- /dev/null
+/*
+ * panic_gbuffer.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ *
+ * Expose a generic buffer header to be passed to the panic handler in
+ * order to dump buffer content in case of kernel panic.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_PANIC_GBUFFER_H
+#define _LINUX_PANIC_GBUFFER_H
+
+struct g_buffer_header {
+ unsigned char *base;
+ size_t size;
+ size_t woff;
+ size_t head;
+};
+
+void panic_set_gbuffer(struct g_buffer_header *gbuffer);
+
+#endif /* _LINUX_PANIC_GBUFFER_H */
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08F9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08FA
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08FB
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08E5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08E6
+#define PCI_DEVICE_ID_INTEL_CLV_OTG 0xE006
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG 0x119E
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC45 0x0f50
+#define PCI_DEVICE_ID_INTEL_BYT_OTG 0x0f37
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_SST_MRFLD 0x119A
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
--- /dev/null
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND 0xA0
+#define RP_IPC_SIMPLE_COMMAND 0xA1
+#define RP_IPC_RAW_COMMAND 0xA2
+
+#define RP_PMIC_ACCESS 0xFF
+#define RP_DFU_REQUEST 0xFE
+#define RP_SET_WATCHDOG 0xF8
+#define RP_FLIS_ACCESS 0xF5
+#define RP_GET_FW_REVISION 0xF4
+#define RP_COLD_BOOT 0xF3
+#define RP_COLD_RESET 0xF1
+#define RP_COLD_OFF 0x80
+#define RP_MIP_ACCESS 0xEC
+#define RP_GET_HOBADDR 0xE5
+#define RP_OSC_CLK_CTRL 0xE6
+#define RP_S0IX_COUNTER 0xE8
+#define RP_WRITE_OSNIB 0xE4
+#define RP_CLEAR_FABERROR 0xE3
+#define RP_FW_UPDATE 0xFE
+#define RP_VRTC 0xFA
+#define RP_PMDB 0xE0
+#define RP_WRITE_OEMNIB 0xDF /* Command is used to write OEMNIB */
+ /* data. Used with extended OSHOB */
+ /* OSNIB only. */
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ * values.
+ */
+#define RP_PMIC_GPIO 0X02
+#define RP_PMIC_AUDIO 0x03
+#define RP_MSIC_GPIO 0x05
+#define RP_MSIC_AUDIO 0x06
+#define RP_MSIC_OCD 0x07
+#define RP_MSIC_BATTERY 0XEF
+#define RP_MSIC_THERMAL 0x09
+#define RP_MSIC_POWER_BTN 0x10
+#define RP_IPC 0X11
+#define RP_IPC_UTIL 0X12
+#define RP_FW_ACCESS 0X13
+#define RP_UMIP_ACCESS 0x14
+#define RP_OSIP_ACCESS 0x15
+#define RP_MSIC_ADC 0x16
+#define RP_BQ24192 0x17
+#define RP_MSIC_CLV_AUDIO 0x18
+#define RP_PMIC_CCSM 0x19
+#define RP_PMIC_I2C 0x20
+#define RP_MSIC_MRFLD_AUDIO 0x21
+#define RP_MSIC_PWM 0x22
+#define RP_MSIC_KPD_LED 0x23
+#define RP_BCOVE_ADC 0x24
+#define RP_BCOVE_THERMAL 0x25
+#define RP_MRFL_OCD 0x26
+#define RP_FW_LOGGING 0x27
+#define RP_PMIC_CHARGER 0x28
+
+enum rproc_type {
+ RPROC_SCU = 0,
+ RPROC_PSH,
+ RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+ enum rproc_type type;
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+ struct list_head node;
+};
+
+struct rpmsg_ns_list {
+ struct list_head list;
+ struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+ int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+ struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+ const char *name;
+ const char *firmware;
+ const struct rproc_ops *ops;
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
--- /dev/null
+/*
+ * ADS7955 SPI ADC driver
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dave Hunt <dave.hunt@emutex.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_TI_ADS7955_H__
+#define __LINUX_PLATFORM_DATA_TI_ADS7955_H__
+
+/**
+ * struct ads7955_platform_data - Platform data for the ads7955 ADC driver
+ * @ext_ref: Whether to use an external reference voltage.
+ **/
+struct ads7955_platform_data {
+ bool ext_ref;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_TI_ADS7955_H__ */
--- /dev/null
+#ifndef __BATTERY_ID_H__
+
+#define __BATTERY_ID_H__
+
+enum {
+ POWER_SUPPLY_BATTERY_REMOVED = 0,
+ POWER_SUPPLY_BATTERY_INSERTED,
+};
+
+enum batt_chrg_prof_type {
+ PSE_MOD_CHRG_PROF = 0,
+};
+
+/* charging profile structure definition */
+struct ps_batt_chg_prof {
+ enum batt_chrg_prof_type chrg_prof_type;
+ void *batt_prof;
+};
+
+/* PSE Modified Algo Structure */
+/* Parameters defining the charging range */
+struct ps_temp_chg_table {
+ /* upper temperature limit for each zone */
+ short int temp_up_lim;
+ /* charge current and voltage */
+ short int full_chrg_vol;
+ short int full_chrg_cur;
+ /* maintenance thresholds */
+ /* maintenance lower threshold. Once battery hits full, charging
+ * charging will be resumed when battery voltage <= this voltage
+ */
+ short int maint_chrg_vol_ll;
+ /* Charge current and voltage in maintenance mode */
+ short int maint_chrg_vol_ul;
+ short int maint_chrg_cur;
+} __packed;
+
+
+#define BATTID_STR_LEN 8
+#define BATT_TEMP_NR_RNG 6
+/* Charging Profile */
+struct ps_pse_mod_prof {
+ /* battery id */
+ char batt_id[BATTID_STR_LEN];
+ /* type of battery */
+ u16 battery_type;
+ u16 capacity;
+ u16 voltage_max;
+ /* charge termination current */
+ u16 chrg_term_ma;
+ /* Low battery level voltage */
+ u16 low_batt_mV;
+ /* upper and lower temperature limits on discharging */
+ u8 disch_tmp_ul;
+ u8 disch_tmp_ll;
+ /* number of temperature monitoring ranges */
+ u16 temp_mon_ranges;
+ struct ps_temp_chg_table temp_mon_range[BATT_TEMP_NR_RNG];
+ /* Lowest temperature supported */
+ short int temp_low_lim;
+} __packed;
+
+/*For notification during battery change event*/
+extern struct atomic_notifier_head batt_id_notifier;
+
+extern void battery_prop_changed(int battery_conn_stat,
+ struct ps_batt_chg_prof *batt_prop);
+#ifdef CONFIG_POWER_SUPPLY_BATTID
+extern int get_batt_prop(struct ps_batt_chg_prof *batt_prop);
+#else
+static inline int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+ return -ENOMEM;
+}
+#endif
+extern int batt_id_reg_notifier(struct notifier_block *nb);
+extern void batt_id_unreg_notifier(struct notifier_block *nb);
+#endif
--- /dev/null
+/*
+ * bq24261_charger.h: platform data structure for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef __BQ24261_CHARGER_H__
+#define __BQ24261_CHARGER_H__
+
+struct bq24261_plat_data {
+ u32 irq_map;
+ u8 irq_mask;
+ char **supplied_to;
+ size_t num_supplicants;
+ struct power_supply_throttle *throttle_states;
+ size_t num_throttle_states;
+ int safety_timer;
+ int boost_mode_ma;
+ bool is_ts_enabled;
+
+ int (*enable_charging) (bool val);
+ int (*enable_charger) (bool val);
+ int (*set_inlmt) (int val);
+ int (*set_cc) (int val);
+ int (*set_cv) (int val);
+ int (*set_iterm) (int val);
+ int (*enable_vbus) (bool val);
+ /* WA for ShadyCove VBUS removal detect issue */
+ int (*handle_low_supply) (void);
+ void (*dump_master_regs) (void);
+};
+
+extern void bq24261_cv_to_reg(int, u8*);
+extern void bq24261_cc_to_reg(int, u8*);
+extern void bq24261_inlmt_to_reg(int, u8*);
+
+#ifdef CONFIG_BQ24261_CHARGER
+extern int bq24261_get_bat_health(void);
+extern int bq24261_get_bat_status(void);
+#else
+static int bq24261_get_bat_health(void)
+{
+ return 0;
+}
+static int bq24261_get_bat_status(void)
+{
+ return 0;
+}
+#endif
+
+#endif
POWER_SUPPLY_SCOPE_DEVICE,
};
+enum {
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT_ZERO = 0,
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT_LOW,
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT_MEDIUM,
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT_HIGH,
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT_NONE,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_CHARGE_CURRENT_LIMIT,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_INLMT,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_MAX_TEMP,
+ POWER_SUPPLY_PROP_MIN_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+ POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+ POWER_SUPPLY_PROP_ENABLE_CHARGING,
+ POWER_SUPPLY_PROP_ENABLE_CHARGER,
+ POWER_SUPPLY_PROP_CABLE_TYPE,
+ POWER_SUPPLY_PROP_PRIORITY,
POWER_SUPPLY_PROP_SCOPE,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_TYPE_UPS,
POWER_SUPPLY_TYPE_MAINS,
POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */
+ POWER_SUPPLY_TYPE_USB_INVAL, /* Invalid Standard Downstream Port */
POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_USB_HOST, /* To support OTG devices */
+};
+
+enum power_supply_charger_event {
+ POWER_SUPPLY_CHARGER_EVENT_CONNECT = 0,
+ POWER_SUPPLY_CHARGER_EVENT_UPDATE,
+ POWER_SUPPLY_CHARGER_EVENT_RESUME,
+ POWER_SUPPLY_CHARGER_EVENT_SUSPEND,
+ POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+};
+
+struct power_supply_charger_cap {
+ enum power_supply_charger_event chrg_evt;
+ enum power_supply_type chrg_type;
+ unsigned int mA; /* input current limit */
+};
+
+enum power_supply_charger_cable_type {
+ POWER_SUPPLY_CHARGER_TYPE_NONE = 0,
+ POWER_SUPPLY_CHARGER_TYPE_USB_SDP = 1 << 0,
+ POWER_SUPPLY_CHARGER_TYPE_USB_DCP = 1 << 1,
+ POWER_SUPPLY_CHARGER_TYPE_USB_CDP = 1 << 2,
+ POWER_SUPPLY_CHARGER_TYPE_USB_ACA = 1 << 3,
+ POWER_SUPPLY_CHARGER_TYPE_AC = 1 << 4,
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK = 1 << 5,
+ POWER_SUPPLY_CHARGER_TYPE_ACA_A = 1 << 6,
+ POWER_SUPPLY_CHARGER_TYPE_ACA_B = 1 << 7,
+ POWER_SUPPLY_CHARGER_TYPE_ACA_C = 1 << 8,
+ POWER_SUPPLY_CHARGER_TYPE_SE1 = 1 << 9,
+ POWER_SUPPLY_CHARGER_TYPE_MHL = 1 << 10,
+ POWER_SUPPLY_CHARGER_TYPE_B_DEVICE = 1 << 11,
};
+struct power_supply_cable_props {
+ enum power_supply_charger_event chrg_evt;
+ enum power_supply_charger_cable_type chrg_type;
+ unsigned int ma; /* input current limit */
+};
+
+#define POWER_SUPPLY_CHARGER_TYPE_USB \
+ (POWER_SUPPLY_CHARGER_TYPE_USB_SDP | \
+ POWER_SUPPLY_CHARGER_TYPE_USB_DCP | \
+ POWER_SUPPLY_CHARGER_TYPE_USB_CDP | \
+ POWER_SUPPLY_CHARGER_TYPE_USB_ACA | \
+ POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK| \
+ POWER_SUPPLY_CHARGER_TYPE_SE1)
+
union power_supply_propval {
int intval;
const char *strval;
};
+enum psy_throttle_action {
+
+ PSY_THROTTLE_DISABLE_CHARGER = 0,
+ PSY_THROTTLE_DISABLE_CHARGING,
+ PSY_THROTTLE_CC_LIMIT,
+ PSY_THROTTLE_INPUT_LIMIT,
+};
+
+struct power_supply_throttle {
+ enum psy_throttle_action throttle_action;
+ unsigned throttle_val;
+};
+
struct power_supply {
const char *name;
enum power_supply_type type;
size_t num_properties;
char **supplied_to;
+ unsigned long supported_cables;
size_t num_supplicants;
-
+ struct power_supply_throttle *throttle_states;
+ size_t num_throttle_states;
char **supplied_from;
size_t num_supplies;
#ifdef CONFIG_OF
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
void (*set_charged)(struct power_supply *psy);
+ void (*charging_port_changed)(struct power_supply *psy,
+ struct power_supply_charger_cap *cap);
/* For APM emulation, think legacy userspace. */
int use_for_apm;
extern struct power_supply *power_supply_get_by_name(const char *name);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_is_battery_connected(void);
extern int power_supply_set_battery_charged(struct power_supply *psy);
+extern void power_supply_charger_event(struct power_supply_charger_cap cap);
+extern void power_supply_query_charger_caps(struct power_supply_charger_cap
+ *cap);
-#ifdef CONFIG_POWER_SUPPLY
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
extern int power_supply_is_system_supplied(void);
#else
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
enum {
PWMF_REQUESTED = 1 << 0,
PWMF_ENABLED = 1 << 1,
+ PWMF_EXPORTED = 1 << 2,
};
struct pwm_device {
struct pwm_chip *chip;
void *chip_data;
- unsigned int period; /* in nanoseconds */
+ unsigned int period; /* in nanoseconds */
+ unsigned int duty_cycle; /* in nanoseconds */
+ enum pwm_polarity polarity;
};
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
return pwm ? pwm->period : 0;
}
+static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
+{
+ if (pwm)
+ pwm->duty_cycle = duty;
+}
+
+static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+{
+ return pwm ? pwm->duty_cycle : 0;
+}
+
/*
* pwm_set_polarity - configure the polarity of a PWM signal
*/
}
#endif
+#ifdef CONFIG_PWM_SYSFS
+void pwmchip_sysfs_export(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+#else
+static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+{
+}
+
+static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+{
+}
+#endif /* CONFIG_PWM_SYSFS */
+
+
#endif /* __LINUX_PWM_H */
--- /dev/null
+/*
+ * intel_basin_cove_pmic.h - Support for Basin Cove pmic VR
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __INTEL_BASIN_COVE_PMIC_H_
+#define __INTEL_BASIN_COVE_PMIC_H_
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+ VPROG1,
+ VPROG2,
+ VPROG3,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+ 1500, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+ 1500, 1800, 2500, 2850,
+};
+
+static const u16 VPROG3_VSEL_table[] = {
+ 1050, 1800, 2500, 2800,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR 0x0ac
+#define VPROG2CNT_ADDR 0x0ad
+#define VPROG3CNT_ADDR 0x0ae
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+ struct regulator_init_data *init_data;
+ struct regulator_dev *intel_pmic_rdev;
+ const u16 *table;
+ u16 pmic_reg;
+ u8 table_len;
+};
+
+#endif /* __INTEL_BASIN_COVE_PMIC_H_ */
--- /dev/null
+/*
+*Support for intel pmic
+*Copyright (c) 2012, Intel Corporation.
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 as
+*published by the Free Software Foundation.
+*
+*/
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+ VPROG1,
+ VPROG2,
+ VEMMC1,
+ VEMMC2,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+ 1200, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+ 1200, 1800, 2500, 2800,
+};
+
+static const u16 VEMMC1_VSEL_table[] = {
+ 2850,
+};
+static const u16 VEMMC2_VSEL_table[] = {
+ 2850,
+};
+
+static const u16 V180AON_VSEL_table[] = {
+ 1800, 1817, 1836, 1854,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR 0x0D6
+#define VPROG2CNT_ADDR 0x0D7
+#define VEMMC1CNT_ADDR 0x0D9
+#define VEMMC2CNT_ADDR 0x0DA
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+ struct regulator_init_data *init_data;
+ struct regulator_dev *intel_pmic_rdev;
+ const u16 *table;
+ u16 pmic_reg;
+ u8 table_len;
+};
--- /dev/null
+/*
+ * Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The SDM (System Debug Monitor) directs trace data routed from
+ * various parts in the system out through the Intel Tangier PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef SDM_H_
+#define SDM_H_
+
+#ifdef CONFIG_INTEL_PTI_STM
+/* the following functions are defined in drivers/misc/stm.c */
+int stm_kernel_get_out(void);
+int stm_kernel_set_out(int bus_type);
+int stm_is_enabled(void);
+#else
+static inline int stm_kernel_get_out(void) { return -EOPNOTSUPP; };
+static inline int stm_kernel_set_out(int bus_type) { return -EOPNOTSUPP; };
+static inline int stm_is_enabled(void) { return 0; };
+#endif
+
+/* Temporary : To be replace later with dynamic*/
+#define STM_NB_IN_PINS 0
+
+/* STM output configurations */
+#define STM_PTI_4BIT_LEGACY 0
+#define STM_PTI_4BIT_NIDNT 1
+#define STM_PTI_16BIT 2
+#define STM_PTI_12BIT 3
+#define STM_PTI_8BIT 4
+#define STM_USB 15
+
+/* Buffer configurations */
+#define DFX_BULK_BUFFER_SIZE 64 /* for Tangier A0 */
+#define DFX_BULK_OUT_BUFFER_ADDR 0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR 0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR_2 0xF90B0400
+
+#define TRACE_BULK_BUFFER_SIZE 65536 /* revision */
+#define TRACE_BULK_IN_BUFFER_ADDR 0xF90A0000 /* revision */
+
+#endif /*SDM_H_*/
+
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
int (*set_wake)(struct uart_port *, unsigned int state);
+ void (*wake_peer)(struct uart_port *);
/*
* Return a string describing the type of the port
--- /dev/null
+#ifndef _LINUX_SERIAL_MAX3110_H
+#define _LINUX_SERIAL_MAX3110_H
+
+/**
+ * struct plat_max3110 - MAX3110 SPI UART platform data
+ * @irq_edge_trigger: if IRQ is edge triggered
+ *
+ * You should use this structure in your machine description to specify
+ * how the MAX3110 is connected.
+ *
+ */
+struct plat_max3110 {
+ int irq_edge_triggered;
+};
+
+#endif
/* HW register offset definition */
#define UART_FOR 0x08
+#define UART_ABR 0x09
#define UART_PS 0x0C
#define UART_MUL 0x0D
#define UART_DIV 0x0E
#define HSU_GBL_INT_BIT_DMA 0x5
#define HSU_GBL_ISR 0x8
-#define HSU_GBL_DMASR 0x400
-#define HSU_GBL_DMAISR 0x404
+#define HSU_GBL_DMASR 0x0
+#define HSU_GBL_DMAISR 0x4
#define HSU_PORT_REG_OFFSET 0x80
#define HSU_PORT0_REG_OFFSET 0x80
#define HSU_PORT2_REG_OFFSET 0x180
#define HSU_PORT_REG_LENGTH 0x80
-#define HSU_DMA_CHANS_REG_OFFSET 0x500
+#define HSU_DMA_CHANS_REG_OFFSET 0x100
#define HSU_DMA_CHANS_REG_LENGTH 0x40
#define HSU_CH_SR 0x0 /* channel status reg */
#define SFI_SIG_WAKE "WAKE"
#define SFI_SIG_DEVS "DEVS"
#define SFI_SIG_GPIO "GPIO"
+#define SFI_SIG_OEMB "OEMB"
#define SFI_SIGNATURE_SIZE 4
#define SFI_OEM_ID_SIZE 6
#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
((ptable->header.len - sizeof(struct sfi_table_header)) / \
(sizeof(entry_type)))
+
+#define SPID_FRU_SIZE 10
+
/*
* Table structures must be byte-packed to match the SFI specification,
* as they are provided by the BIOS.
#define SFI_DEV_TYPE_UART 2
#define SFI_DEV_TYPE_HSI 3
#define SFI_DEV_TYPE_IPC 4
+#define SFI_DEV_TYPE_SD 5
u8 host_num; /* attached to host 0, 1...*/
u16 addr;
--- /dev/null
+/*
+ * Copyright (C) Intel 2009
+ * Ken Mills <ken.k.mills@intel.com>
+ * Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+
+#define PCI_MRST_DMAC1_ID 0x0814
+#define PCI_MDFL_DMAC1_ID 0x0827
+#define PCI_BYT_DMAC1_ID 0x0f06
+#define PCI_MRFL_DMAC_ID 0x11A2
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP 10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+
+#define SSP_CFG_SPI_MODE_ID 1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET 6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode. */
+#define QUIRKS_SRAM_ADDITIONAL_CPY 1
+/* If set the trailing bytes won't be handled by the DMA. */
+/* Trailing byte feature not fully available. */
+#define QUIRKS_DMA_USE_NO_TRAIL 2
+/* If set, the driver will use PM_QOS to reduce the latency */
+/* introduced by the deeper C-states which may produce over/under */
+/* run issues. Must be used in slave mode. In master mode, the */
+/* latency is not critical, but setting this workarround may */
+/* improve the SPI throughput. */
+#define QUIRKS_USE_PM_QOS 4
+/* This quirks is set on Moorestown */
+#define QUIRKS_PLATFORM_MRST 8
+/* This quirks is set on Medfield */
+#define QUIRKS_PLATFORM_MDFL 16
+/* If set, the driver will apply the bitbanging workarround needed */
+/* to enable defective Langwell stepping A SSP. The defective SSP */
+/* can be enabled only once, and should never be disabled. */
+#define QUIRKS_BIT_BANGING 32
+/* If set, SPI is in slave clock mode */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE 64
+/* Add more platform here. */
+/* This quirks is set on Baytrail. */
+#define QUIRKS_PLATFORM_BYT 128
+#define QUIRKS_PLATFORM_MRFL 256
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD 4
+#define MAX_BITS_PER_WORD 32
+#define DFLT_FIFO_BURST_SIZE IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+DEFINE_SSP_REG(SSCR2, 0x40)
+DEFINE_SSP_REG(SSFS, 0x44)
+DEFINE_SSP_REG(SFIFOL, 0x68)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG 0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR SRAM_BASE_ADDR
+#define SRAM_TX_ADDR (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
+#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
+#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
+#define SSCR0_Motorola (0x0 << 4) /* Motorola's SPI mode */
+#define SSCR0_ECS (1 << 6) /* External clock select */
+#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS (1 << 20) /* Extended data size select */
+#define SSCR0_NCS (1 << 21) /* Network clock select */
+#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC (1 << 30) /* Audio clock select */
+#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
+
+#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF (1 << 2) /* Tx FIFO Not Full */
+#define SSSR_RNE (1 << 3) /* Rx FIFO Not Empty */
+#define SSSR_BSY (1 << 4) /* SSP Busy */
+#define SSSR_TFS (1 << 5) /* Tx FIFO Service Request */
+#define SSSR_RFS (1 << 6) /* Rx FIFO Service Request */
+#define SSSR_ROR (1 << 7) /* Rx FIFO Overrun */
+#define SSSR_TFL_MASK (0x0F << 8) /* Tx FIFO level field mask */
+#define SSSR_RFL_SHIFT 12 /* Rx FIFO MASK shift */
+#define SSSR_RFL_MASK (0x0F << SSSR_RFL_SHIFT)/* RxFIFOlevel mask */
+
+#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS (1 << 21) /* Network Clock Select */
+#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */
+
+#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
+#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE (1 << 23) /* Bit Count Error */
+#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC (1 << 20) /* End Of Chain */
+#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
+#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
+
+#define SSCR2_CLK_DEL_EN (1 << 3) /* Delay logic for capturing input data */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+ | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+struct callback_param {
+ void *drv_context;
+ u32 direction;
+};
+
+struct ssp_drv_context {
+ /* Driver model hookup */
+ struct pci_dev *pdev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* SSP register addresses */
+ unsigned long paddr;
+ void *ioaddr;
+ int irq;
+
+ /* I2C registers */
+ dma_addr_t I2C_paddr;
+ void *I2C_ioaddr;
+
+ /* SSP masks*/
+ u32 cr1_sig;
+ u32 cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* PM_QOS request */
+ struct pm_qos_request pm_qos_req;
+
+ struct tasklet_struct poll_transfer;
+
+ spinlock_t lock;
+ struct workqueue_struct *workqueue;
+ struct workqueue_struct *wq_poll_write;
+ struct work_struct pump_messages;
+ struct work_struct poll_write;
+ struct list_head queue;
+ struct completion msg_done;
+
+ int suspended;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ size_t len;
+ size_t len_dma_rx;
+ size_t len_dma_tx;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ bool dma_initialized;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ u8 n_bytes;
+ int (*write)(struct ssp_drv_context *sspc);
+ int (*read)(struct ssp_drv_context *sspc);
+
+ struct intel_mid_dma_slave dmas_tx;
+ struct intel_mid_dma_slave dmas_rx;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+ struct workqueue_struct *dma_wq;
+ struct work_struct complete_work;
+
+ u8 __iomem *virt_addr_sram_tx;
+ u8 __iomem *virt_addr_sram_rx;
+
+ int txdma_done;
+ int rxdma_done;
+ struct callback_param tx_param;
+ struct callback_param rx_param;
+ struct pci_dev *dmac1;
+
+ unsigned long quirks;
+ u32 rx_fifo_threshold;
+
+ /* if CS_ACTIVE_HIGH, cs_assert == 1 else cs_assert == 0 */
+ int cs_assert;
+ int cs_change;
+ void (*cs_control)(u32 command);
+};
+
+struct chip_data {
+ u32 cr0;
+ u32 cr1;
+ u32 timeout;
+ u8 chip_select;
+ u8 n_bytes;
+ u8 dma_enabled;
+ u8 bits_per_word;
+ u32 speed_hz;
+ int (*write)(struct ssp_drv_context *sspc);
+ int (*read)(struct ssp_drv_context *sspc);
+ void (*cs_control)(u32 command);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+ IMSS_FIFO_BURST_1,
+ IMSS_FIFO_BURST_4,
+ IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+ enum intel_mid_ssp_spi_fifo_burst burst_size;
+ u32 timeout;
+ u8 enable_loopback;
+ u8 dma_enabled;
+ void (*cs_control)(u32 command);
+ void (*platform_pinmux)(void);
+};
+
+#define SPI_DIB_NAME_LEN 16
+#define SPI_DIB_SPEC_INFO_LEN 10
+
+struct spi_dib_header {
+ u32 signature;
+ u32 length;
+ u8 rev;
+ u8 checksum;
+ u8 dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
- int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_temp) (struct thermal_zone_device *, long *);
int (*get_mode) (struct thermal_zone_device *,
enum thermal_device_mode *);
int (*set_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_temp) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_trip_temp) (struct thermal_zone_device *, int, long *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int, long);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int, long *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int, long);
+ int (*get_slope) (struct thermal_zone_device *, long *);
+ int (*set_slope) (struct thermal_zone_device *, long);
+ int (*get_intercept) (struct thermal_zone_device *, long *);
+ int (*set_intercept) (struct thermal_zone_device *, long);
+ int (*get_crit_temp) (struct thermal_zone_device *, long *);
int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+ int (*get_force_state_override) (struct thermal_cooling_device *,
+ char *);
+ int (*set_force_state_override) (struct thermal_cooling_device *,
+ char *);
+ int (*get_available_states) (struct thermal_cooling_device *,
+ char *);
};
struct thermal_cooling_device {
* This function is called by the low-level tty driver to signal
* that line discpline should try to send more characters to the
* low-level driver for transmission. If the line discpline does
- * not have any more data to send, it can just return.
+ * not have any more data to send, it can just return. If the line
+ * discpline does have some data to send, please arise a tasklet
+ * or workqueue to do the real data transfer. Do not send data in
+ * this hook, it may lead to a deadlock.
*
* int (*hangup)(struct tty_struct *)
*
--- /dev/null
+/*
+ * <linux/usb/debug.h> -- USB Debug Class definitions.
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_USB_DEBUG_H
+#define __LINUX_USB_DEBUG_H
+
+#include <linux/types.h>
+
+/* Debug Interface Subclass Codes */
+#define USB_SUBCLASS_DVC_GP 0x05
+#define USB_SUBCLASS_DVC_DFX 0x06
+#define USB_SUBCLASS_DVC_TRACE 0x07
+#define USB_SUBCLASS_DEBUG_CONTROL 0x08
+
+/* Debug Interface Function Protocol */
+#define DC_PROTOCOL_VENDOR 0x00
+#define DC_PROTOCOL_LAUTERBACH 0x01
+#define DC_PROTOCOL_ITP 0x02
+
+/* Debug Class-Specific Interface Descriptor Subtypes */
+#define DC_UNDEFINED 0x00
+#define DC_INPUT_CONNECTION 0x01
+#define DC_OUTPUT_CONNECTION 0x02
+#define DC_DEBUG_UNIT 0x03
+#define DC_DEBUG_ATTRIBUTES 0x04 /* revision: per SAS */
+
+/* Debug-Class Input/Output Connection Type */
+#define DC_CONNECTION_USB 0x00
+#define DC_CONNECTION_JTAG 0x01
+#define DC_CONNECTION_DEBUG_DATA_CONTROL 0x02
+#define DC_CONNECTION_DEBUG_DATA 0x03
+#define DC_CONNECTION_DEBUG_CONTROL 0x04
+
+/*
+ * Debug-class (rev 0.88r2) section 4.4.3
+ * Attibute Descriptor, bmControl
+ */
+#define DC_CTL_SET_CFG_DATA_SG (1 << 0)
+#define DC_CTL_SET_CFG_DATA (1 << 1)
+#define DC_CTL_GET_CFG_DATA (1 << 2)
+#define DC_CTL_SET_CFG_ADDR (1 << 3)
+#define DC_CTL_GET_CFG_ADDR (1 << 4)
+#define DC_CTL_SET_ALT_STACK (1 << 5)
+#define DC_CTL_GET_ALT_STACK (1 << 6)
+#define DC_CTL_SET_OP_MODE (1 << 7)
+#define DC_CTL_GET_OP_MODE (1 << 8)
+#define DC_CTL_SET_TRACE_CFG (1 << 9)
+#define DC_CTL_GET_TRACE_CFG (1 << 10)
+#define DC_CTL_SET_BUFF_INFO (1 << 11)
+#define DC_CTL_GET_BUFF_INFO (1 << 12)
+#define DC_CTL_SET_RESET (1 << 13)
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit/Input/Output connection Descriptors,
+ * dTraceFormat
+ */
+#define DC_TRACE_NOT_FORMATED_PASSTHROUGH 0x00000000
+#define DC_TRACE_NOT_FORMATED_HEADER 0x00000001
+#define DC_TRACE_NOT_FORMATED_FOOTER 0x00000002
+#define DC_TRACE_NOT_FORMATED_GUID 0x00000005
+#define DC_TRACE_NOT_FORMATED_UTF8 0x00000006
+#define DC_TRACE_INTEL_FORMATED_VENDOR 0x01000000
+#define DC_TRACE_MIPI_FORMATED_STPV1 0x80000000
+#define DC_TRACE_MIPI_FORMATED_STPV2 0x80000001
+#define DC_TRACE_MIPI_FORMATED_TWP 0x80000100
+#define DC_TRACE_MIPI_FORMATED_OST 0x80001000
+#define DC_TRACE_NEXUS_FORMATED 0x81000000
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitType
+ */
+#define DC_UNIT_TYPE_DFX 0x00
+#define DC_UNIT_TYPE_SELECT 0x01
+#define DC_UNIT_TYPE_TRACE_ROUTE 0x02
+#define DC_UNIT_TYPE_TRACE_PROC 0x03
+#define DC_UNIT_TYPE_TRACE_GEN 0x04
+#define DC_UNIT_TYPE_TRACE_SINK 0x05
+#define DC_UNIT_TYPE_CONTROL 0x06
+#define DC_UNIT_TYPE_VENDOR 0x40
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitSubType
+ */
+#define DC_UNIT_SUBTYPE_NO 0x00
+#define DC_UNIT_SUBTYPE_CPU 0x01
+#define DC_UNIT_SUBTYPE_GFX 0x02
+#define DC_UNIT_SUBTYPE_VIDEO 0x03
+#define DC_UNIT_SUBTYPE_IMAGING 0x04
+#define DC_UNIT_SUBTYPE_AUDIO 0x05
+#define DC_UNIT_SUBTYPE_MODEM 0x06
+#define DC_UNIT_SUBTYPE_BLUETOOTH 0x07
+#define DC_UNIT_SUBTYPE_PWR_MGT 0x08
+#define DC_UNIT_SUBTYPE_SECURITY 0x09
+#define DC_UNIT_SUBTYPE_SENSOR 0x0A
+#define DC_UNIT_SUBTYPE_BUSWATCH 0x0B
+#define DC_UNIT_SUBTYPE_GPS 0x0C
+#define DC_UNIT_SUBTYPE_TRACEZIP 0x0D
+#define DC_UNIT_SUBTYPE_TAPCTL 0x0E
+#define DC_UNIT_SUBTYPE_MEMACC 0x0F
+#define DC_UNIT_SUBTYPE_SWLOGGER 0x40
+#define DC_UNIT_SUBTYPE_SWROUTER 0x41
+#define DC_UNIT_SUBTYPE_SWDRIVER 0x42
+#define DC_UNIT_SUBTYPE_VENDOR 0x80
+
+/* USB DBG requests values */
+#define DC_REQUEST_SET_CONFIG_DATA 0x01
+#define DC_REQUEST_SET_CONFIG_DATA_SINGLE 0x02
+#define DC_REQUEST_SET_CONFIG_ADDRESS 0x03
+#define DC_REQUEST_SET_ALT_STACK 0x04
+#define DC_REQUEST_SET_OPERATING 0x05
+#define DC_REQUEST_SET_TRACE 0x08
+#define DC_REQUEST_SET_BUFFER_INFO 0x09
+#define DC_REQUEST_SET_RESET 0x0A
+#define DC_REQUEST_GET_CONFIG_DATA 0x81
+#define DC_REQUEST_GET_CONFIG_DATA_SINGLE 0x82
+#define DC_REQUEST_GET_CONFIG_ADDRESS 0x83
+#define DC_REQUEST_GET_ALT_STACK 0x84
+#define DC_REQUEST_GET_OPERATING 0x85
+#define DC_REQUEST_GET_TRACE 0x86
+#define DC_REQUEST_GET_INFO 0x87
+#define DC_REQUEST_GET_ERROR 0x88
+#define DC_REQUEST_GET_BUFFER_INFO 0x89
+
+/* Debug-Class Debug-Attributes Descriptor */
+struct dc_debug_attributes_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le16 bcdDC;
+ __le16 wTotalLength;
+ __u8 bmSupportedFeatures;
+ __u8 bControlSize; /* n */
+ __u8 bmControl[0]; /* [n] */
+ __le16 wAuxDataSize; /* m */
+ __le32 dInputBufferSize;
+ __le32 dOutputBufferSize;
+ __le64 qBaseAddress;
+ __le64 hGlobalID[2];
+ __u8 Supplementary[0]; /* [m-32] */
+} __attribute__((__packed__));
+
+#define DC_DEBUG_ATTR_DESCR(name) \
+ dc_debug_attributes_descriptor_##name
+
+#define DECLARE_DC_DEBUG_ATTR_DESCR(name, n, m) \
+struct DC_DEBUG_ATTR_DESCR(name) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __le16 bcdDC; \
+ __le16 wTotalLength; \
+ __u8 bmSupportedFeatures; \
+ __u8 bControlSize; \
+ __u8 bmControl[n]; \
+ __le16 wAuxDataSize; \
+ __le32 dInputBufferSize; \
+ __le32 dOutputBufferSize; \
+ __le64 qBaseAddress; \
+ __le64 hGlobalID[2]; \
+ __u8 Supplementary[m-32]; \
+} __attribute__((__packed__));
+
+#define DC_DBG_ATTRI_SIZE(n, m) (9 + (n) + 2 + (m))
+
+/* Debug-Class Input Connection Descriptor */
+struct dc_input_connection_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bConnectionID;
+ __u8 bConnectionType;
+ __u8 bAssocConnection;
+ __u8 iConnection;
+ __le32 dTraceFormat;
+ __le32 dStreamID;
+} __attribute__((__packed__));
+
+#define DC_INPUT_CONNECTION_SIZE 15
+
+/* Debug-Class Output Connection Descriptor */
+struct dc_output_connection_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bConnectionID;
+ __u8 bConnectionType;
+ __u8 bAssocConnection;
+ __le16 wSourceID;
+ __u8 iConnection;
+} __attribute__((__packed__));
+
+#define DC_OUTPUT_CONNECTION_SIZE 9
+
+/* Debug-Class Debug-Unit Descriptor */
+struct dc_debug_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bDebugUnitType;
+ __u8 bDebugSubUnitType;
+ __u8 bAliasUnitID;
+ __u8 bNrInPins; /* p */
+ __le16 wSourceID[0]; /* [p] */
+ __u8 bNrOutPins; /* q */
+ __le32 dTraceFormat[0]; /* [q] */
+ __le32 dStreamID;
+ __u8 bControlSize; /* n */
+ __u8 bmControl[0]; /* [n] */
+ __le16 wAuxDataSize; /* m */
+ __le64 qBaseAddress;
+ __le64 hIPID[2];
+ __u8 Supplementary[0]; /* [m-24] */
+ __u8 iDebugUnitType;
+} __attribute__((__packed__));
+
+#define DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m) \
+ dc_debug_unit_descriptor_##p_##q##n_##m
+
+#define DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m) \
+struct DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bDebugUnitType; \
+ __u8 bDebugSubUnitType; \
+ __u8 bAliasUnitID; \
+ __u8 bNrInPins; \
+ __le16 wSourceID[p]; \
+ __u8 bNrOutPins; \
+ __le32 dTraceFormat[q]; \
+ __le32 dStreamID; \
+ __u8 bControlSize; \
+ __u8 bmControl[n]; \
+ __le16 wAuxDataSize; \
+ __le64 qBaseAddress; \
+ __le64 hIPID[2]; \
+ __u8 Supplementary[m-24]; \
+ __u8 iDebugUnitType; \
+} __attribute__((__packed__));
+
+#define DC_DBG_UNIT_SIZE(p, q, n, m) \
+(8 + (p * 2) + 1 + (q * 4) + 5 + (n) + 2 + (m) + 1)
+
+#endif /* __LINUX_USB_DEBUG_H */
--- /dev/null
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_INTEL_H
+#define __DWC3_INTEL_H
+
+#include "otg.h"
+
+enum intel_mid_pmic_type {
+ NO_PMIC,
+ SHADY_COVE,
+ BASIN_COVE
+};
+
+struct intel_dwc_otg_pdata {
+ int is_hvp;
+ enum intel_mid_pmic_type pmic_type;
+ int charger_detect_enable;
+ int gpio_cs;
+ int gpio_reset;
+ int gpio_id;
+ int id;
+ int charging_compliance;
+ struct delayed_work suspend_discon_work;
+ u8 ti_phy_vs1;
+ int sdp_charging;
+};
+
+#define TUSB1211_VENDOR_ID_LO 0x00
+#define TUSB1211_VENDOR_ID_HI 0x01
+#define TUSB1211_PRODUCT_ID_LO 0x02
+#define TUSB1211_PRODUCT_ID_HI 0x03
+#define TUSB1211_FUNC_CTRL 0x04
+#define TUSB1211_FUNC_CTRL_SET 0x05
+#define TUSB1211_FUNC_CTRL_CLR 0x06
+#define TUSB1211_IFC_CTRL 0x07
+#define TUSB1211_IFC_CTRL_SET 0x08
+#define TUSB1211_IFC_CTRL_CLR 0x09
+#define TUSB1211_OTG_CTRL 0x0A
+#define TUSB1211_OTG_CTRL_SET 0x0B
+#define TUSB1211_OTG_CTRL_CLR 0x0C
+#define TUSB1211_USB_INT_EN_RISE 0x0D
+#define TUSB1211_USB_INT_EN_RISE_SET 0x0E
+#define TUSB1211_USB_INT_EN_RISE_CLR 0x0F
+#define TUSB1211_USB_INT_EN_FALL 0x10
+#define TUSB1211_USB_INT_EN_FALL_SET 0x11
+#define TUSB1211_USB_INT_EN_FALL_CLR 0x12
+#define TUSB1211_USB_INT_STS 0x13
+#define TUSB1211_USB_INT_LATCH 0x14
+#define TUSB1211_DEBUG 0x15
+#define TUSB1211_SCRATCH_REG 0x16
+#define TUSB1211_SCRATCH_REG_SET 0x17
+#define TUSB1211_SCRATCH_REG_CLR 0x18
+#define TUSB1211_ACCESS_EXT_REG_SET 0x2F
+
+#define TUSB1211_VENDOR_SPECIFIC1 0x80
+#define TUSB1211_VENDOR_SPECIFIC1_SET 0x81
+#define TUSB1211_VENDOR_SPECIFIC1_CLR 0x82
+#define TUSB1211_POWER_CONTROL 0x3D
+#define TUSB1211_POWER_CONTROL_SET 0x3E
+#define TUSB1211_POWER_CONTROL_CLR 0x3F
+
+#define TUSB1211_VENDOR_SPECIFIC2 0x80
+#define TUSB1211_VENDOR_SPECIFIC2_SET 0x81
+#define TUSB1211_VENDOR_SPECIFIC2_CLR 0x82
+#define TUSB1211_VENDOR_SPECIFIC2_STS 0x83
+#define TUSB1211_VENDOR_SPECIFIC2_LATCH 0x84
+#define TUSB1211_VENDOR_SPECIFIC3 0x85
+#define TUSB1211_VENDOR_SPECIFIC3_SET 0x86
+#define TUSB1211_VENDOR_SPECIFIC3_CLR 0x87
+#define TUSB1211_VENDOR_SPECIFIC4 0x88
+#define TUSB1211_VENDOR_SPECIFIC4_SET 0x89
+#define TUSB1211_VENDOR_SPECIFIC4_CLR 0x8A
+#define TUSB1211_VENDOR_SPECIFIC5 0x8B
+#define TUSB1211_VENDOR_SPECIFIC5_SET 0x8C
+#define TUSB1211_VENDOR_SPECIFIC5_CLR 0x8D
+#define TUSB1211_VENDOR_SPECIFIC6 0x8E
+#define TUSB1211_VENDOR_SPECIFIC6_SET 0x8F
+#define TUSB1211_VENDOR_SPECIFIC6_CLR 0x90
+
+#define VS1_DATAPOLARITY (1 << 6)
+#define VS1_ZHSDRV(v) ((v & 0x3) << 5)
+#define VS1_IHSTX(v) ((v & 0x7))
+
+#define VS2STS_VBUS_MNTR_STS (1 << 7)
+#define VS2STS_REG3V3IN_MNTR_STS (1 << 6)
+#define VS2STS_SVLDCONWKB_WDOG_STS (1 << 5)
+#define VS2STS_ID_FLOAT_STS (1 << 4)
+#define VS2STS_ID_RARBRC_STS(v) ((v & 0x3) << 2)
+#define VS2STS_BVALID_STS (1 << 0)
+
+#define VS3_CHGD_IDP_SRC_EN (1 << 6)
+#define VS3_IDPULLUP_WK_EN (1 << 5)
+#define VS3_SW_USB_DET (1 << 4)
+#define VS3_DATA_CONTACT_DET_EN (1 << 3)
+#define VS3_REG3V3_VSEL(v) (v & 0x7)
+
+#define VS4_ACA_DET_EN (1 << 6)
+#define VS4_RABUSIN_EN (1 << 5)
+#define VS4_R1KSERIES (1 << 4)
+#define VS4_PSW_OSOD (1 << 3)
+#define VS4_PSW_CMOS (1 << 2)
+#define VS4_CHGD_SERX_DP (1 << 1)
+#define VS4_CHGD_SERX_DM (1 << 0)
+
+#define VS5_AUTORESUME_WDOG_EN (1 << 6)
+#define VS5_ID_FLOAT_EN (1 << 5)
+#define VS5_ID_RES_EN (1 << 4)
+#define VS5_SVLDCONWKB_WDOG_EN (1 << 3)
+#define VS5_VBUS_MNTR_RISE_EN (1 << 2)
+#define VS5_VBUS_MNTR_FALL_EN (1 << 1)
+#define VS5_REG3V3IN_MNTR_EN (1 << 0)
+
+#define DEBUG_LINESTATE (0x3 << 0)
+
+#define OTGCTRL_USEEXTVBUS_INDICATOR (1 << 7)
+#define OTGCTRL_DRVVBUSEXTERNAL (1 << 6)
+#define OTGCTRL_DRVVBUS (1 << 5)
+#define OTGCTRL_CHRGVBUS (1 << 4)
+#define OTGCTRL_DISCHRGVBUS (1 << 3)
+#define OTGCTRL_DMPULLDOWN (1 << 2)
+#define OTGCTRL_DPPULLDOWN (1 << 1)
+#define OTGCTRL_IDPULLUP (1 << 0)
+
+#define FUNCCTRL_SUSPENDM (1 << 6)
+#define FUNCCTRL_RESET (1 << 5)
+#define FUNCCTRL_OPMODE(v) ((v & 0x3) << 3)
+#define FUNCCTRL_TERMSELECT (1 << 2)
+#define FUNCCTRL_XCVRSELECT(v) (v & 0x3)
+
+#define PWCTRL_HWDETECT (1 << 7)
+#define PWCTRL_DP_VSRC_EN (1 << 6)
+#define PWCTRL_VDAT_DET (1 << 5)
+#define PWCTRL_DP_WKPU_EN (1 << 4)
+#define PWCTRL_BVALID_FALL (1 << 3)
+#define PWCTRL_BVALID_RISE (1 << 2)
+#define PWCTRL_DET_COMP (1 << 1)
+#define PWCTRL_SW_CONTROL (1 << 0)
+
+
+#define PMIC_VLDOCNT 0xAF
+#define PMIC_VLDOCNT_VUSBPHYEN (1 << 2)
+
+#define PMIC_TLP1ESBS0I1VNNBASE 0X6B
+#define PMIC_I2COVRDADDR 0x59
+#define PMIC_I2COVROFFSET 0x5A
+#define PMIC_USBPHYCTRL 0x30
+#define PMIC_I2COVRWRDATA 0x5B
+#define PMIC_I2COVRCTRL 0x58
+#define PMIC_I2COVRCTL_I2CWR 0x01
+
+#define USBPHYCTRL_D0 (1 << 0)
+#define PMIC_USBIDCTRL 0x19
+#define USBIDCTRL_ACA_DETEN_D1 (1 << 1)
+#define USBIDCTRL_USB_IDEN_D0 (1 << 0)
+#define PMIC_USBIDSTS 0x1A
+#define USBIDSTS_ID_GND (1 << 0)
+#define USBIDSTS_ID_RARBRC_STS(v) ((v & 0x3) << 1)
+#define USBIDSTS_ID_FLOAT_STS (1 << 3)
+#define PMIC_USBPHYCTRL_D0 (1 << 0)
+#define APBFC_EXIOTG3_MISC0_REG 0xF90FF85C
+
+#define DATACON_TIMEOUT 750
+#define DATACON_INTERVAL 10
+#define VBUS_TIMEOUT 300
+#define PCI_DEVICE_ID_DWC 0x119E
+
+#define VENDOR_ID_MASK (0x03 << 6)
+#define BASIN_COVE_PMIC_ID (0x03 << 6)
+
+#define PMIC_MAJOR_REV (0x07 << 3)
+#define PMIC_A0_MAJOR_REV 0x00
+
+#endif /* __DWC3_INTEL_H */
--- /dev/null
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_INTEL_H
+#define __DWC3_INTEL_H
+
+#include "otg.h"
+
+struct intel_dwc_otg_pdata {
+ int is_hvp;
+ int charging_compliance;
+};
+
+#define TUSB1211_VENDOR_ID_LO 0x00
+#define TUSB1211_VENDOR_ID_HI 0x01
+#define TUSB1211_PRODUCT_ID_LO 0x02
+#define TUSB1211_PRODUCT_ID_HI 0x03
+#define TUSB1211_FUNC_CTRL 0x04
+#define TUSB1211_FUNC_CTRL_SET 0x05
+#define TUSB1211_FUNC_CTRL_CLR 0x06
+#define TUSB1211_IFC_CTRL 0x07
+#define TUSB1211_IFC_CTRL_SET 0x08
+#define TUSB1211_IFC_CTRL_CLR 0x09
+#define TUSB1211_OTG_CTRL 0x0A
+#define TUSB1211_OTG_CTRL_SET 0x0B
+#define TUSB1211_OTG_CTRL_CLR 0x0C
+#define TUSB1211_USB_INT_EN_RISE 0x0D
+#define TUSB1211_USB_INT_EN_RISE_SET 0x0E
+#define TUSB1211_USB_INT_EN_RISE_CLR 0x0F
+#define TUSB1211_USB_INT_EN_FALL 0x10
+#define TUSB1211_USB_INT_EN_FALL_SET 0x11
+#define TUSB1211_USB_INT_EN_FALL_CLR 0x12
+#define TUSB1211_USB_INT_STS 0x13
+#define TUSB1211_USB_INT_LATCH 0x14
+#define TUSB1211_DEBUG 0x15
+#define TUSB1211_SCRATCH_REG 0x16
+#define TUSB1211_SCRATCH_REG_SET 0x17
+#define TUSB1211_SCRATCH_REG_CLR 0x18
+#define TUSB1211_ACCESS_EXT_REG_SET 0x2F
+
+#define TUSB1211_VENDOR_SPECIFIC1 0x80
+#define TUSB1211_VENDOR_SPECIFIC1_SET 0x81
+#define TUSB1211_VENDOR_SPECIFIC1_CLR 0x82
+#define TUSB1211_POWER_CONTROL 0x3D
+#define TUSB1211_POWER_CONTROL_SET 0x3E
+#define TUSB1211_POWER_CONTROL_CLR 0x3F
+
+#define TUSB1211_VENDOR_SPECIFIC2 0x80
+#define TUSB1211_VENDOR_SPECIFIC2_SET 0x81
+#define TUSB1211_VENDOR_SPECIFIC2_CLR 0x82
+#define TUSB1211_VENDOR_SPECIFIC2_STS 0x83
+#define TUSB1211_VENDOR_SPECIFIC2_LATCH 0x84
+#define TUSB1211_VENDOR_SPECIFIC3 0x85
+#define TUSB1211_VENDOR_SPECIFIC3_SET 0x86
+#define TUSB1211_VENDOR_SPECIFIC3_CLR 0x87
+#define TUSB1211_VENDOR_SPECIFIC4 0x88
+#define TUSB1211_VENDOR_SPECIFIC4_SET 0x89
+#define TUSB1211_VENDOR_SPECIFIC4_CLR 0x8A
+#define TUSB1211_VENDOR_SPECIFIC5 0x8B
+#define TUSB1211_VENDOR_SPECIFIC5_SET 0x8C
+#define TUSB1211_VENDOR_SPECIFIC5_CLR 0x8D
+#define TUSB1211_VENDOR_SPECIFIC6 0x8E
+#define TUSB1211_VENDOR_SPECIFIC6_SET 0x8F
+#define TUSB1211_VENDOR_SPECIFIC6_CLR 0x90
+
+#define VS1_DATAPOLARITY (1 << 6)
+#define VS1_ZHSDRV(v) ((v & 0x3) << 5)
+#define VS1_IHSTX(v) ((v & 0x7))
+
+#define VS2STS_VBUS_MNTR_STS (1 << 7)
+#define VS2STS_REG3V3IN_MNTR_STS (1 << 6)
+#define VS2STS_SVLDCONWKB_WDOG_STS (1 << 5)
+#define VS2STS_ID_FLOAT_STS (1 << 4)
+#define VS2STS_ID_RARBRC_STS(v) ((v & 0x3) << 2)
+#define VS2STS_BVALID_STS (1 << 0)
+
+#define VS3_CHGD_IDP_SRC_EN (1 << 6)
+#define VS3_IDPULLUP_WK_EN (1 << 5)
+#define VS3_SW_USB_DET (1 << 4)
+#define VS3_DATA_CONTACT_DET_EN (1 << 3)
+#define VS3_REG3V3_VSEL(v) (v & 0x7)
+
+#define VS4_ACA_DET_EN (1 << 6)
+#define VS4_RABUSIN_EN (1 << 5)
+#define VS4_R1KSERIES (1 << 4)
+#define VS4_PSW_OSOD (1 << 3)
+#define VS4_PSW_CMOS (1 << 2)
+#define VS4_CHGD_SERX_DP (1 << 1)
+#define VS4_CHGD_SERX_DM (1 << 0)
+
+#define VS5_AUTORESUME_WDOG_EN (1 << 6)
+#define VS5_ID_FLOAT_EN (1 << 5)
+#define VS5_ID_RES_EN (1 << 4)
+#define VS5_SVLDCONWKB_WDOG_EN (1 << 3)
+#define VS5_VBUS_MNTR_RISE_EN (1 << 2)
+#define VS5_VBUS_MNTR_FALL_EN (1 << 1)
+#define VS5_REG3V3IN_MNTR_EN (1 << 0)
+
+#define DEBUG_LINESTATE (0x3 << 0)
+
+#define OTGCTRL_USEEXTVBUS_INDICATOR (1 << 7)
+#define OTGCTRL_DRVVBUSEXTERNAL (1 << 6)
+#define OTGCTRL_DRVVBUS (1 << 5)
+#define OTGCTRL_CHRGVBUS (1 << 4)
+#define OTGCTRL_DISCHRGVBUS (1 << 3)
+#define OTGCTRL_DMPULLDOWN (1 << 2)
+#define OTGCTRL_DPPULLDOWN (1 << 1)
+#define OTGCTRL_IDPULLUP (1 << 0)
+
+#define FUNCCTRL_SUSPENDM (1 << 6)
+#define FUNCCTRL_RESET (1 << 5)
+#define FUNCCTRL_OPMODE(v) ((v & 0x3) << 3)
+#define FUNCCTRL_TERMSELECT (1 << 2)
+#define FUNCCTRL_XCVRSELECT(v) (v & 0x3)
+
+#define PWCTRL_HWDETECT (1 << 7)
+#define PWCTRL_DP_VSRC_EN (1 << 6)
+#define PWCTRL_VDAT_DET (1 << 5)
+#define PWCTRL_DP_WKPU_EN (1 << 4)
+#define PWCTRL_BVALID_FALL (1 << 3)
+#define PWCTRL_BVALID_RISE (1 << 2)
+#define PWCTRL_DET_COMP (1 << 1)
+#define PWCTRL_SW_CONTROL (1 << 0)
+
+
+#define PMIC_VLDOCNT 0xAF
+#define PMIC_VLDOCNT_VUSBPHYEN (1 << 2)
+
+#define PMIC_TLP1ESBS0I1VNNBASE 0X6B
+#define PMIC_I2COVRDADDR 0x59
+#define PMIC_I2COVROFFSET 0x5A
+#define PMIC_USBPHYCTRL 0x30
+#define PMIC_I2COVRWRDATA 0x5B
+#define PMIC_I2COVRCTRL 0x58
+#define PMIC_I2COVRCTL_I2CWR 0x01
+
+#define USBPHYCTRL_D0 (1 << 0)
+#define PMIC_USBIDCTRL 0x19
+#define USBIDCTRL_ACA_DETEN_D1 (1 << 1)
+#define USBIDCTRL_USB_IDEN_D0 (1 << 0)
+#define PMIC_USBIDSTS 0x1A
+#define USBIDSTS_ID_GND (1 << 0)
+#define USBIDSTS_ID_RARBRC_STS(v) ((v & 0x3) << 0)
+#define USBIDSTS_ID_FLOAT_STS (1 << 3)
+#define PMIC_USBPHYCTRL_D0 (1 << 0)
+#define APBFC_EXIOTG3_MISC0_REG 0xF90FF85C
+
+#define DATACON_TIMEOUT 750
+#define DATACON_INTERVAL 10
+#define VBUS_TIMEOUT 300
+#define PCI_DEVICE_ID_DWC 0x119E
+#endif /* __DWC3_INTEL_H */
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
struct usb_ep;
/**
* struct usb_gadget - represents a usb slave device
+ * @work: (internal use) Workqueue to be used for sysfs_notify()
* @ops: Function pointers used to access hardware-specific operations.
* @ep0: Endpoint zero, used when reading or writing responses to
* driver setup() requests
* device is acting as a B-Peripheral (so is_a_peripheral is false).
*/
struct usb_gadget {
+ struct work_struct work;
/* readonly to gadget driver */
const struct usb_gadget_ops *ops;
struct usb_ep *ep0;
unsigned out_epnum;
unsigned in_epnum;
};
+#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
{ dev_set_drvdata(&gadget->dev, data); }
#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
+#define HCD_FLAG_IRQ_DISABLED 7 /* Interrupt was disabled */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_IRQ_DISABLED(hcd) ((hcd)->flags & (1U << HCD_FLAG_IRQ_DISABLED))
/* Flags that get set only during HCD registration or removal. */
unsigned rh_registered:1;/* is root hub registered? */
unsigned wireless:1; /* Wireless USB HCD */
unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
+ unsigned has_wakeup_irq:1; /* Can IRQ when suspended */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
--- /dev/null
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __PENWELL_OTG_H__
+#define __PENWELL_OTG_H__
+
+#include <linux/usb/intel_mid_otg.h>
+#include <linux/power_supply.h>
+#include <linux/wakelock.h>
+
+#define PMU_OTG_WAKE_SOURCE 6
+#define CI_USBCMD 0x30
+# define USBCMD_RST BIT(1)
+# define USBCMD_RS BIT(0)
+#define CI_USBSTS 0x34
+# define USBSTS_SLI BIT(8)
+# define USBSTS_URI BIT(6)
+# define USBSTS_PCI BIT(2)
+#define CI_USBINTR 0x38
+# define USBINTR_PCE BIT(2)
+#define CI_ULPIVP 0x60
+# define ULPI_WU BIT(31)
+# define ULPI_RUN BIT(30)
+# define ULPI_RW BIT(29)
+# define ULPI_SS BIT(27)
+# define ULPI_PORT (BIT(26) | BIT(25) | BIT(24))
+# define ULPI_ADDR (0xff << 16)
+# define ULPI_DATRD (0xff << 8)
+# define ULPI_DATWR (0xff << 0)
+#define CI_PORTSC1 0x74
+# define PORTSC_PP BIT(12)
+# define PORTSC_LS (BIT(11) | BIT(10))
+# define PORTSC_SUSP BIT(7)
+# define PORTSC_CCS BIT(0)
+#define CI_HOSTPC1 0xb4
+# define HOSTPC1_PHCD BIT(22)
+#define CI_OTGSC 0xf4
+# define OTGSC_DPIE BIT(30)
+# define OTGSC_1MSE BIT(29)
+# define OTGSC_BSEIE BIT(28)
+# define OTGSC_BSVIE BIT(27)
+# define OTGSC_ASVIE BIT(26)
+# define OTGSC_AVVIE BIT(25)
+# define OTGSC_IDIE BIT(24)
+# define OTGSC_DPIS BIT(22)
+# define OTGSC_1MSS BIT(21)
+# define OTGSC_BSEIS BIT(20)
+# define OTGSC_BSVIS BIT(19)
+# define OTGSC_ASVIS BIT(18)
+# define OTGSC_AVVIS BIT(17)
+# define OTGSC_IDIS BIT(16)
+# define OTGSC_DPS BIT(14)
+# define OTGSC_1MST BIT(13)
+# define OTGSC_BSE BIT(12)
+# define OTGSC_BSV BIT(11)
+# define OTGSC_ASV BIT(10)
+# define OTGSC_AVV BIT(9)
+# define OTGSC_ID BIT(8)
+# define OTGSC_HABA BIT(7)
+# define OTGSC_HADP BIT(6)
+# define OTGSC_IDPU BIT(5)
+# define OTGSC_DP BIT(4)
+# define OTGSC_OT BIT(3)
+# define OTGSC_HAAR BIT(2)
+# define OTGSC_VC BIT(1)
+# define OTGSC_VD BIT(0)
+#define CI_USBMODE 0xf8
+# define USBMODE_CM (BIT(1) | BIT(0))
+# define USBMODE_IDLE 0
+# define USBMODE_DEVICE 0x2
+# define USBMODE_HOST 0x3
+#define USBCFG_ADDR 0xff10801c
+#define USBCFG_LEN 4
+# define USBCFG_VBUSVAL BIT(14)
+# define USBCFG_AVALID BIT(13)
+# define USBCFG_BVALID BIT(12)
+# define USBCFG_SESEND BIT(11)
+
+#define OTGSC_INTEN_MASK \
+ (OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE \
+ | OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE)
+
+#define OTGSC_INTSTS_MASK \
+ (OTGSC_DPIS | OTGSC_BSEIS | OTGSC_BSVIS \
+ | OTGSC_ASVIS | OTGSC_AVVIS | OTGSC_IDIS)
+
+#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
+
+#define HOST_REQUEST_FLAG BIT(0)
+
+/* MSIC register for vbus power control */
+#define MSIC_ID 0x00
+# define ID0_VENDID0 (BIT(7) | BIT(6))
+#define MSIC_ID1 0x01
+# define ID1_VENDID1 (BIT(7) | BIT(6))
+#define MSIC_VUSB330CNT 0xd4
+#define MSIC_VOTGCNT 0xdf
+# define VOTGEN BIT(7)
+# define VOTGRAMP BIT(4)
+#define MSIC_SPWRSRINT1 0x193
+# define SUSBCHPDET BIT(6)
+# define SUSBDCDET BIT(2)
+# define MSIC_SPWRSRINT1_MASK (BIT(6) | BIT(2))
+# define SPWRSRINT1_CDP BIT(6)
+# define SPWRSRINT1_SDP 0
+# define SPWRSRINT1_DCP BIT(2)
+#define MSIC_USB_MISC 0x2c8 /* Intel Specific */
+# define MISC_CHGDSERXDPINV BIT(5)
+#define MSIC_OTGCTRL 0x39c
+#define MSIC_OTGCTRLSET 0x340
+#define MSIC_OTGCTRLCLR 0x341
+#define ULPI_OTGCTRL 0x0a
+#define ULPI_OTGCTRLSET 0x0b
+#define ULPI_OTGCTRLCLR 0x0c
+# define DRVVBUS_EXTERNAL BIT(6)
+# define DRVVBUS BIT(5)
+# define DMPULLDOWN BIT(2)
+# define DPPULLDOWN BIT(1)
+#define MSIC_USBINTEN_RISE 0x39d
+#define MSIC_USBINTEN_RISESET 0x39e
+#define MSIC_USBINTEN_RISECLR 0x39f
+#define MSIC_USBINTEN_FALL 0x3a0
+#define MSIC_USBINTEN_FALLSET 0x3a1
+#define MSIC_USBINTEN_FALLCLR 0x3a2
+
+/*
+ * For Clovertrail, due to change of USB PHY from MSIC to external standalone
+ * chip, USB Interrupt Enable Rising/Falling registers can be accessed only
+ * from ULPI interface.
+ */
+#define ULPI_USBINTEN_RISING 0xd
+#define ULPI_USBINTEN_RISINGSET 0xe
+#define ULPI_USBINTEN_RISINGCLR 0xf
+#define ULPI_USBINTEN_FALLING 0x10
+#define ULPI_USBINTEN_FALLINGSET 0x11
+#define ULPI_USBINTEN_FALLINGCLR 0x12
+
+# define IDGND BIT(4)
+# define SESSEND BIT(3)
+# define SESSVLD BIT(2)
+# define VBUSVLD BIT(1)
+# define HOSTDISCON BIT(0)
+#define MSIC_PWRCTRL 0x3b5
+#define MSIC_PWRCTRLSET 0x342
+#define MSIC_PWRCTRLCLR 0x343
+#define ULPI_PWRCTRL 0x3d
+#define ULPI_PWRCTRLSET 0x3e
+#define ULPI_PWRCTRLCLR 0x3f
+# define HWDET BIT(7)
+# define DPVSRCEN BIT(6)
+# define VDATDET BIT(5)
+# define DPWKPUEN BIT(4)
+# define SWCNTRL BIT(0)
+#define MSIC_FUNCTRL 0x398
+#define MSIC_FUNCTRLSET 0x344
+#define MSIC_FUNCTRLCLR 0x345
+#define ULPI_FUNCTRL 0x04
+#define ULPI_FUNCTRLSET 0x05
+#define ULPI_FUNCTRLCLR 0x06
+# define PHYRESET BIT(5)
+# define OPMODE1 BIT(4)
+# define OPMODE0 BIT(3)
+# define TERMSELECT BIT(2)
+# define XCVRSELECT1 BIT(1)
+# define XCVRSELECT0 BIT(0)
+#define MSIC_DEBUG 0x3a5
+#define ULPI_DEBUG 0x15
+# define LINESTATE_MSK (BIT(0) | BIT(1))
+# define LINESTATE_SE1 (BIT(0) | BIT(1))
+# define LINESTATE_SE0 (0)
+# define LINESTATE_FSJ BIT(0)
+# define LINESTATE_FSK BIT(1)
+#define MSIC_VS1 0x3b6
+#define MSIC_VS1SET 0x3a9
+#define MSIC_VS1CLR 0x3aa
+#define ULPI_VS1 0x80
+#define ULPI_VS1SET 0x81
+#define ULPI_VS1CLR 0x82
+# define DATAPOLARITY BIT(6)
+#define ULPI_VS2STS 0x83
+#define ULPI_VS2LATCH 0x84
+# define VBUS_MNTR_STS BIT(7)
+# define REG3V3_MNTR_STS BIT(6)
+# define SVLDCONWKB_WDOG_STS BIT(5)
+# define IDFLOAT_STS BIT(4)
+# define IDRARBRC_STS(d) (((d)>>2)&3)
+# define IDRARBRC_STS1 BIT(3)
+# define IDRARBRC_STS2 BIT(2)
+# define IDRARBRC_MSK (BIT(2) | BIT(3))
+# define IDRARBRC_A 1
+# define IDRARBRC_B 2
+# define IDRARBRC_C 3
+# define BVALID_STS BIT(0)
+#define MSIC_VS3 0x3b9
+#define MSIC_VS3SET 0x346 /* Vendor Specific */
+#define MSIC_VS3CLR 0x347
+# define SWUSBDET BIT(4)
+# define DATACONEN BIT(3)
+#define ULPI_VS3 0x85
+#define ULPI_VS3SET 0x86
+#define ULPI_VS3CLR 0x87
+# define CHGD_IDP_SRC BIT(6)
+# define IDPULLUP_WK BIT(5)
+# define SWUSBDET BIT(4)
+# define DATACONEN BIT(3)
+#define MSIC_VS4 0x3ba
+#define MSIC_VS4SET 0x3ab
+#define MSIC_VS4CLR 0x3ac
+#define ULPI_VS4 0x88
+#define ULPI_VS4SET 0x89
+#define ULPI_VS4CLR 0x8a
+# define ACADET BIT(6)
+# define RABUSIN BIT(5)
+# define R1KERIES BIT(4)
+# define CHRG_SERX_DP BIT(1)
+# define CHRG_SERX_DM BIT(0)
+#define ULPI_VS5 0x8b
+#define ULPI_VS5SET 0x8c
+#define ULPI_VS5CLR 0x8d
+# define AUTORESUME_WDOG BIT(6)
+# define IDFLOAT_EN BIT(5)
+# define IDRES_EN BIT(4)
+# define SVLDCONWKB_WDOG BIT(3)
+# define VBUS_MNTR_RISEEN BIT(2)
+# define VBUS_MNTR_FALLEN BIT(1)
+# define REG3V3IN_MNTR_EN BIT(0)
+#define ULPI_VS6 0x8e
+#define ULPI_VS6SET 0x8f
+#define ULPI_VS6CLR 0x90
+# define ACA_RID_B_CFG BIT(7)
+# define ACA_RID_A_CFG BIT(6)
+# define SOF_EN BIT(5)
+#define MSIC_ULPIACCESSMODE 0x348
+# define SPIMODE BIT(0)
+#define MSIC_INT_EN_RISE 0x39D
+#define MSIC_INT_EN_RISE_SET 0x39E
+#define MSIC_INT_EN_RISE_CLR 0x39F
+#define MSIC_INT_EN_FALL 0x3A0
+#define MSIC_INT_EN_FALL_SET 0x3A1
+#define MSIC_INT_EN_FALL_CLR 0x3A2
+
+/* MSIC TI implementation for ADP/ACA */
+#define SPI_TI_VS2 0x3B7
+#define SPI_TI_VS2_LATCH 0x3B8
+#define SPI_TI_VS4 0x3BA
+#define SPI_TI_VS5 0x3BB
+#define ULPI_TI_USB_INT_STS 0x13
+#define ULPI_TI_USB_INT_LAT 0x14
+# define USB_INT_IDGND BIT(4)
+# define USB_INT_SESSEND BIT(3)
+# define USB_INT_SESSVLD BIT(2)
+# define USB_INT_VBUSVLD BIT(1)
+#define ULPI_TI_VS2 0x83
+# define TI_ID_FLOAT_STS BIT(4)
+# define TI_ID_RARBRC_STS(d) (((d)>>2)&3)
+# define TI_ID_RARBRC_STS_MASK (BIT(3) | BIT(2))
+# define TI_ID_RARBRC_NONE 0
+# define TI_ID_RARBRC_A 1
+# define TI_ID_RARBRC_B 2
+# define TI_ID_RARBRC_C 3
+# define TI_ADP_INT_STS BIT(1)
+#define ULPI_TI_VS4 0x88
+# define TI_ACA_DET_EN BIT(6)
+#define ULPI_TI_VS5 0x8b
+# define TI_ADP_INT_EN BIT(7)
+# define TI_ID_FLOAT_EN BIT(5)
+# define TI_ID_RES_EN BIT(4)
+#define ULPI_TI_VS6 0x8e
+# define TI_HS_TXPREN BIT(4)
+# define TI_ADP_MODE(d) (((d)>>2)&3)
+# define TI_ADP_MODE_MASK (BIT(3) | BIT(2))
+# define TI_ADP_MODE_DISABLE 0
+# define TI_ADP_MODE_SENSE 1
+# define TI_ADP_MODE_PRB_A 2
+# define TI_ADP_MODE_PRB_B 3
+# define TI_VBUS_IADP_SRC BIT(1)
+# define TI_VBUS_IADP_SINK BIT(0)
+#define ULPI_TI_VS7 0x91
+# define TI_T_ADP_HIGH (0xff)
+#define ULPI_TI_VS8 0x94
+# define TI_T_ADP_LOW (0xff)
+#define ULPI_TI_VS9 0x97
+# define TI_T_ADP_RISE (0xff)
+
+#define TI_PRB_DELTA 0x08
+
+/* MSIC FreeScale Implementation for ADP */
+#define ULPI_FS_ADPCL 0x28
+# define ADPCL_PRBDSCHG (BIT(5) | BIT(6))
+# define ADPCL_PRBDSCHG_4 0
+# define ADPCL_PRBDSCHG_8 1
+# define ADPCL_PRBDSCHG_16 2
+# define ADPCL_PRBDSCHG_32 3
+# define ADPCL_PRBPRD (BIT(3) | BIT(4))
+# define ADPCL_PRBPRD_A_HALF 0
+# define ADPCL_PRBPRD_B_HALF 1
+# define ADPCL_PRBPRD_A 2
+# define ADPCL_PRBPRD_B 3
+# define ADPCL_SNSEN BIT(2)
+# define ADPCL_PRBEN BIT(1)
+# define ADPCL_ADPEN BIT(0)
+#define ULPI_FS_ADPCH 0x29
+# define ADPCH_PRBDELTA (0x1f << 0)
+#define ULPI_FS_ADPIE 0x2a
+# define ADPIE_ADPRAMPIE BIT(2)
+# define ADPIE_SNSMISSIE BIT(1)
+# define ADPIE_PRBTRGIE BIT(0)
+#define ULPI_FS_ADPIS 0x2b
+# define ADPIS_ADPRAMPS BIT(5)
+# define ADPIS_SNSMISSS BIT(4)
+# define ADPIS_PRBTRGS BIT(3)
+# define ADPIS_ADPRAMPI BIT(2)
+# define ADPIS_SNSMISSI BIT(1)
+# define ADPIS_PRBTRGI BIT(0)
+#define ULPI_FS_ADPRL 0x2c
+# define ADPRL_ADPRAMP (0xff << 0)
+#define ULPI_FS_ADPRH 0x2d
+# define ADPRH_ADPRAMP (0x7 << 0)
+
+#define FS_ADPI_MASK (ADPIS_ADPRAMPI | ADPIS_SNSMISSI | ADPIS_PRBTRGI)
+
+/* define Data connect checking timeout and polling interval */
+#define DATACON_TIMEOUT 750
+#define DATACON_INTERVAL 20
+
+enum penwell_otg_timer_type {
+ TA_WAIT_VRISE_TMR,
+ TA_WAIT_BCON_TMR,
+ TA_AIDL_BDIS_TMR,
+ TA_BIDL_ADIS_TMR,
+ TA_WAIT_VFALL_TMR,
+ TB_ASE0_BRST_TMR,
+ TB_SE0_SRP_TMR,
+ TB_SRP_FAIL_TMR, /* wait for response of SRP */
+ TB_BUS_SUSPEND_TMR,
+ TTST_MAINT_TMR,
+ TTST_NOADP_TMR,
+};
+
+#define TA_WAIT_VRISE 100
+#define TA_WAIT_BCON 50000
+#define TA_AIDL_BDIS 1500
+#define TA_BIDL_ADIS 300
+#define TA_WAIT_VFALL 950
+#define TB_ASE0_BRST 300
+#define TB_SE0_SRP 1200
+#define TB_SSEND_SRP 1800
+# define SRP_MON_INVAL 300 /* TODO: interval needs more tuning */
+#define TB_SRP_FAIL 5500
+#define TB_BUS_SUSPEND 500
+#define THOS_REQ_POL 1500
+/* Test mode */
+#define TTST_MAINT 9900
+#define TTST_NOADP 5000
+
+/* MSIC vendor information */
+enum msic_vendor {
+ MSIC_VD_FS,
+ MSIC_VD_TI,
+ MSIC_VD_UNKNOWN
+};
+
+/* charger defined in BC 1.2 */
+enum usb_charger_type {
+ CHRG_UNKNOWN,
+ CHRG_SDP, /* Standard Downstream Port */
+ CHRG_CDP, /* Charging Downstream Port */
+ CHRG_SDP_INVAL, /* Invaild Standard Downstream Port */
+ CHRG_DCP, /* Dedicated Charging Port */
+ CHRG_ACA, /* Accessory Charger Adapter */
+ CHRG_ACA_DOCK, /* Accessory Charger Adapter - Dock */
+ CHRG_ACA_A, /* Accessory Charger Adapter - RID_A */
+ CHRG_ACA_B, /* Accessory Charger Adapter - RID_B */
+ CHRG_ACA_C, /* Accessory Charger Adapter - RID_C */
+ CHRG_SE1, /* SE1 (Apple)*/
+ CHRG_MHL /* Moblie High-Definition Link */
+};
+
+struct adp_status {
+ struct completion adp_comp;
+ u8 t_adp_rise;
+};
+
+/* Invalid SDP checking timeout */
+#define INVALID_SDP_TIMEOUT (HZ * 15)
+
+/* OTG Battery Charging capability is used in charger capability detection */
+struct otg_bc_cap {
+ enum usb_charger_type chrg_type;
+ unsigned int ma;
+#define CHRG_CURR_UNKNOWN 0
+#define CHRG_CURR_DISCONN 0
+#define CHRG_CURR_SDP_SUSP 2
+#define CHRG_CURR_SDP_UNCONFIG 100
+#define CHRG_CURR_SDP_LOW 100
+#define CHRG_CURR_SDP_HIGH 500
+#define CHRG_CURR_SDP_INVAL 500
+#define CHRG_CURR_CDP 1500
+#define CHRG_CURR_DCP 1500
+#define CHRG_CURR_SE1 1500
+#define CHRG_CURR_ACA 1500
+ unsigned int current_event;
+};
+
+struct otg_bc_event {
+ struct list_head node;
+ struct power_supply_cable_props cap;
+};
+
+/* Bus monitor action for b_ssend_srp/b_se0_srp */
+#define BUS_MON_STOP 0
+#define BUS_MON_START 1
+#define BUS_MON_CONTINUE 2
+
+/* define event ids to notify battery driver */
+#define USBCHRG_EVENT_CONNECT 1
+#define USBCHRG_EVENT_DISCONN 2
+#define USBCHRG_EVENT_SUSPEND 3
+#define USBCHRG_EVENT_RESUME 4
+#define USBCHRG_EVENT_UPDATE 5
+
+struct intel_mid_otg_pdata {
+ int gpio_vbus;
+ int gpio_cs;
+ int gpio_reset;
+ int charging_compliance;
+ int hnp_poll_support;
+ unsigned power_budget;
+};
+
+struct penwell_otg {
+ struct intel_mid_otg_xceiv iotg;
+ struct device *dev;
+
+ unsigned region;
+ unsigned cfg_region;
+
+ struct work_struct work;
+ struct work_struct hnp_poll_work;
+ struct work_struct psc_notify;
+ struct work_struct uevent_work;
+ struct delayed_work ulpi_poll_work;
+ struct delayed_work ulpi_check_work;
+ struct delayed_work sdp_check_work;
+ struct workqueue_struct *qwork;
+ struct workqueue_struct *chrg_qwork;
+
+
+ struct timer_list hsm_timer;
+ struct timer_list hnp_poll_timer;
+ struct timer_list bus_mon_timer;
+
+ unsigned long b_se0_srp_time;
+ unsigned long b_ssend_srp_time;
+
+ struct mutex msic_mutex;
+ enum msic_vendor msic;
+
+ struct notifier_block iotg_notifier;
+ int queue_stop;
+
+ struct adp_status adp;
+
+ spinlock_t charger_lock;
+ struct list_head chrg_evt_queue;
+ struct otg_bc_cap charging_cap;
+ spinlock_t cap_lock;
+ struct power_supply_cable_props psc_cap;
+ int (*bc_callback)(void *arg, int event, struct otg_bc_cap *cap);
+ void *bc_arg;
+
+ unsigned rt_resuming;
+
+ unsigned rt_quiesce;
+ struct intel_mid_otg_pdata *otg_pdata;
+
+ struct wake_lock wake_lock;
+ spinlock_t lock;
+
+ int phy_power_state;
+};
+
+static inline
+struct penwell_otg *iotg_to_penwell(struct intel_mid_otg_xceiv *iotg)
+{
+ return container_of(iotg, struct penwell_otg, iotg);
+}
+
+extern int penwell_otg_query_charging_cap(struct otg_bc_cap *cap);
+extern int penwell_otg_query_power_supply_cap(
+ struct power_supply_cable_props *cap);
+extern void *penwell_otg_register_bc_callback(
+ int (*cb)(void *, int, struct otg_bc_cap *), void *arg);
+extern int penwell_otg_unregister_bc_callback(void *handler);
+
+extern int pnw_otg_ulpi_write(u8 reg, u8 val);
+extern int is_clovertrail(struct pci_dev *pdev);
+
+#endif /* __PENWELL_OTG_H__ */
USB_EVENT_ID, /* id was grounded */
USB_EVENT_CHARGER, /* usb dedicated charger */
USB_EVENT_ENUMERATED, /* gadget driver enumerated */
+ USB_EVENT_DRIVE_VBUS, /* drive vbus request */
};
/* associate a type with PHY */
enum usb_device_speed speed);
int (*notify_disconnect)(struct usb_phy *x,
enum usb_device_speed speed);
+
+ /* check charger status */
+ int (*get_chrg_status)(struct usb_phy *x, void *data);
};
/**
return "UNKNOWN PHY TYPE";
}
}
+
+static inline int
+otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+ if (x && x->get_chrg_status)
+ return x->get_chrg_status(x, data);
+
+ return -ENOTSUPP;
+}
+
#endif /* __LINUX_USB_PHY_H */
struct wl12xx_platform_data {
void (*set_power)(bool enable);
+ int (*hw_init)(struct wl12xx_platform_data *pdata);
+ void (*hw_deinit)(struct wl12xx_platform_data *pdata);
/* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
int irq;
+ /* gpio must be set to -EINVAL by platform code if gpio based irq is
+ not used */
+ int gpio;
bool use_eeprom;
int board_ref_clock;
int board_tcxo_clock;
--- /dev/null
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+ int (*set_power)(int val);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+ char *nvram_id;
+ bool use_fast_irq;
+};
+
+#endif
bdaddr_t dst;
__u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
__u16 handle;
__u16 state;
__u8 mode;
__u8 state;
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+
__le16 psm;
+ __le16 sport;
__u16 dcid;
__u16 scid;
__u8 chan_type;
__u8 chan_policy;
- __le16 sport;
-
__u8 sec_level;
__u8 ident;
#include <sound/pcm.h>
struct snd_compr_ops;
+struct snd_pcm_substream;
/**
* struct snd_compr_runtime: runtime stream description
* the ring buffer
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
+ * @wait: drain wait queue
+ * @drain_wake: condition for drain wake
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
u64 total_bytes_available;
u64 total_bytes_transferred;
wait_queue_head_t sleep;
+ wait_queue_head_t wait;
+ unsigned int drain_wake;
+ struct snd_pcm_substream *fe_substream;
void *private_data;
};
int snd_compress_deregister(struct snd_compr *device);
int snd_compress_new(struct snd_card *card, int device,
int type, struct snd_compr *compr);
+int snd_compr_stop(struct snd_compr_stream *stream);
/* dsp driver callback apis
* For playback: driver should call snd_compress_fragment_elapsed() to let the
wake_up(&stream->runtime->sleep);
}
+static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+{
+ snd_BUG_ON(!stream);
+
+ stream->runtime->drain_wake = 1;
+ wake_up(&stream->runtime->wait);
+}
+
#endif
struct snd_mixer_oss *mixer_oss;
int mixer_oss_change_count;
#endif
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+ struct snd_effect_ops *effect_ops;
+ struct mutex effect_lock; /* effect lock */
+#endif
};
#ifdef CONFIG_PM
--- /dev/null
+/*
+ * effect_driver.h - effect offload driver APIs
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_DRIVER_H
+#define __EFFECT_DRIVER_H
+
+#include <sound/effect_offload.h>
+
+struct snd_effect_ops {
+ int (*create)(struct snd_card *card, struct snd_effect *effect);
+ int (*destroy)(struct snd_card *card, struct snd_effect *effect);
+ int (*set_params)(struct snd_card *card,
+ struct snd_effect_params *params);
+ int (*get_params)(struct snd_card *card,
+ struct snd_effect_params *params);
+ int (*query_num_effects)(struct snd_card *card);
+ int (*query_effect_caps)(struct snd_card *card,
+ struct snd_effect_caps *caps);
+};
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops);
+int snd_effect_deregister(struct snd_card *card);
+#else
+static inline int snd_effect_register(struct snd_card *card,
+ struct snd_effect_ops *ops)
+{
+ return -ENODEV;
+}
+static inline int snd_effect_deregister(struct snd_card *card)
+{
+ return -ENODEV;
+}
+#endif
+
+/* IOCTL fns */
+int snd_ctl_effect_create(struct snd_card *card, void *arg);
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg);
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg);
+#endif
--- /dev/null
+#ifndef __INTEL_SST_IOCTL_H__
+#define __INTEL_SST_IOCTL_H__
+/*
+ * intel_sst_ioctl.h - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file defines all sst ioctls
+ */
+
+/* codec and post/pre processing related info */
+
+#include <linux/types.h>
+
+/* Pre and post processing params structure */
+struct snd_ppp_params {
+ __u8 algo_id;/* Post/Pre processing algorithm ID */
+ __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
+ __u8 enable; /* 0= disable, 1= enable*/
+ __u8 operation; /* 0 = set_algo, 1 = get_algo */
+ __u32 size; /*Size of parameters for all blocks*/
+ void *params;
+} __packed;
+
+struct snd_sst_driver_info {
+ __u32 max_streams;
+};
+
+struct snd_sst_tuning_params {
+ __u8 type;
+ __u8 str_id;
+ __u8 size;
+ __u8 rsvd;
+ __u64 addr;
+} __packed;
+
+/*IOCTL defined here */
+/*SST common ioctls */
+#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info)
+#define SNDRV_SST_SET_ALGO _IOW('L', 0x30, struct snd_ppp_params)
+#define SNDRV_SST_GET_ALGO _IOWR('L', 0x31, struct snd_ppp_params)
+#define SNDRV_SST_TUNING_PARAMS _IOW('L', 0x32, struct snd_sst_tuning_params)
+#endif /* __INTEL_SST_IOCTL_H__ */
unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */
snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */
+ snd_pcm_sframes_t soc_delay; /* extra delay; typically delay incurred in soc */
u64 hw_ptr_wrap; /* offset for hw_ptr due to boundary wrap-around */
/* -- HW params -- */
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int capture_active:1; /* stream is in use */
- unsigned int playback_active:1; /* stream is in use */
+ unsigned int capture_active; /* cap streams is in use */
+ unsigned int playback_active; /* pb streams is in use */
unsigned int symmetric_rates:1;
struct snd_pcm_runtime *runtime;
unsigned int active;
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list);
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
+
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
#ifndef __LINUX_SND_SOC_DPCM_H
#define __LINUX_SND_SOC_DPCM_H
+#include <linux/slab.h>
#include <linux/list.h>
#include <sound/pcm.h>
int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *);
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+ int stream, struct snd_soc_dapm_widget_list **list_);
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+ int stream, struct snd_soc_dapm_widget_list **list, int new);
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+ int event);
+
+static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
+{
+ kfree(*list);
+}
+
+
#endif
#include <sound/compress_driver.h>
#include <sound/control.h>
#include <sound/ac97_codec.h>
+#include <sound/effect_driver.h>
/*
* Convenience kcontrol builders
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
.platform_max = xmax, .invert = xinvert} }
+#define SND_SOC_BYTES_EXT(xname, xcount, xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_bytes_ext, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_bytes_ext) \
+ {.max = xcount} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
const char *dai_link);
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_soc_register_effect(struct snd_soc_card *card,
+ struct snd_effect_ops *ops);
+int snd_soc_unregister_effect(struct snd_soc_card *card);
+#else
+static inline int snd_soc_register_effect(struct snd_soc_card *card,
+ struct snd_effect_ops *ops)
+{
+ return -ENODEV;
+}
+static inline int snd_soc_unregister_effect(struct snd_soc_card *card)
+{
+ return -ENODEV;
+}
+#endif
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *ucontrol);
/**
* struct snd_soc_reg_access - Describes whether a given register is
* @name: gpio name
* @report: value to report when jack detected
* @invert: report presence in low state
+ * @irq_flag: Interrupt flags for GPIO-Irq line
* @debouce_time: debouce time in ms
* @wake: enable as wake source
* @jack_status_check: callback function which overrides the detection
int invert;
int debounce_time;
bool wake;
+ unsigned long irq_flags;
struct snd_soc_jack *jack;
struct delayed_work work;
/* machine stream operations */
const struct snd_soc_ops *ops;
const struct snd_soc_compr_ops *compr_ops;
+
+ /*no of substreams */
+ unsigned int playback_count;
+ unsigned int capture_count;
};
struct snd_soc_codec_conf {
/* Dynamic PCM BE runtime data */
struct snd_soc_dpcm_runtime dpcm[2];
+ int fe_compr;
long pmdown_time;
unsigned char pop_wait:1;
unsigned int regbase, regcount, nbits, invert;
};
+struct soc_bytes_ext {
+ int max;
+};
+
/* enumerated kcontrol */
struct soc_enum {
unsigned short reg;
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
+struct mmc_ioc_rpmb_req {
+ __u16 type; /* RPMB request type */
+ __u16 *result; /* response or request result */
+ __u16 blk_cnt; /* Number of blocks(half sector 256B) */
+ __u16 addr; /* data address */
+ __u32 *wc; /* write counter */
+ __u8 *nonce; /* Ramdom number */
+ __u8 *data; /* Buffer of the user data */
+ __u8 *mac; /* Message Authentication Code */
+};
+
+#define MMC_IOC_RPMB_REQ _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_rpmb_req)
+
/*
* Since this ioctl is only meant to enhance (and not replace) normal access
* to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
#define USB_CLASS_CSCID 0x0b /* chip+ smart card */
#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
#define USB_CLASS_VIDEO 0x0e
+#define USB_CLASS_DEBUG 0xdc
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
#include <sound/compress_params.h>
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/**
* struct snd_compressed_buffer: compressed buffer
* @fragment_size: size of buffer fragment in bytes
struct snd_compr_tstamp {
__u32 byte_offset;
__u32 copied_total;
- snd_pcm_uframes_t pcm_frames;
- snd_pcm_uframes_t pcm_io_frames;
+ __u32 pcm_frames;
+ __u32 pcm_io_frames;
__u32 sampling_rate;
};
struct snd_compr_avail {
__u64 avail;
struct snd_compr_tstamp tstamp;
-};
+} __attribute__((packed));
enum snd_compr_direction {
SND_COMPRESS_PLAYBACK = 0,
--- /dev/null
+/*
+ * effect_offload.h - effect offload header definations
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_OFFLOAD_H
+#define __EFFECT_OFFLOAD_H
+
+#include <linux/types.h>
+
+#define SNDRV_EFFECT_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 0)
+
+struct snd_effect {
+ char uuid[16]; /* effect UUID */
+ int device; /* streaming interface for effect insertion */
+ int pos; /* position of effect to be placed in effect chain */
+ int mode; /* Backend for Global device (Headset/Speaker) */
+};
+
+struct snd_effect_params {
+ char uuid[16];
+ int device;
+ u32 size; /* size of parameter blob */
+ char *buffer;
+};
+
+struct snd_effect_caps {
+ u32 size; /* size of buffer to read effect descriptors */
+ char *buffer;
+};
+
+#define SNDRV_CTL_IOCTL_EFFECT_VERSION _IOR('E', 0x00, int)
+#define SNDRV_CTL_IOCTL_EFFECT_CREATE _IOW('E', 0x01,\
+ struct snd_effect *)
+#define SNDRV_CTL_IOCTL_EFFECT_DESTROY _IOW('E', 0x02,\
+ struct snd_effect *)
+#define SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS _IOW('E', 0x03,\
+ struct snd_effect_params *)
+#define SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS _IOWR('E', 0x04,\
+ struct snd_effect_params *)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM _IOR('E', 0x05, int)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS _IOWR('E', 0x06,\
+ struct snd_effect_caps *)
+#endif
int ret;
pr_debug("calling %pF @ %i\n", fn, task_pid_nr(current));
+
calltime = ktime_get();
ret = fn();
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
pr_debug("initcall %pF returned %d after %lld usecs\n",
fn, ret, duration);
/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
- continue;
+ goto next;
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
ent.cgrp = task_cgroup_from_root(tsk, root);
/* nothing to do if this task is already in the cgroup */
if (ent.cgrp == cgrp)
- continue;
+ goto next;
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
-
+ next:
if (!threadgroup)
break;
} while_each_thread(leader, tsk);
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
+ irq_settings_set_chained(desc);
irq_startup(desc, true);
}
out:
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ _IRQ_CHAINED = IRQ_CHAINED,
};
#define IRQ_PER_CPU GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
+#define IRQ_CHAINED GOT_YOU_MORON
static inline void
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
{
return desc->status_use_accessors & _IRQ_NESTED_THREAD;
}
+
+static inline bool irq_settings_set_chained(struct irq_desc *desc)
+{
+ return desc->status_use_accessors |= _IRQ_CHAINED;
+}
u64 elapsed_csecs64;
unsigned int elapsed_csecs;
bool wakeup = false;
+ int sleep_usecs = USEC_PER_MSEC;
+ char *busy_wq_name = NULL;
do_gettimeofday(&start);
log_next_seq++;
}
+/* Clears the ring-buffer */
+void log_buf_clear(void)
+{
+ clear_seq = log_next_seq;
+ clear_idx = log_next_idx;
+}
+
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
int dmesg_restrict = 1;
#else
{
int retval = 0, wake = 0;
- if (console_trylock()) {
+ if (!in_nmi() && console_trylock()) {
retval = 1;
/*
}
lockdep_off();
- raw_spin_lock(&logbuf_lock);
+ if (unlikely(in_nmi())) {
+ if (!raw_spin_trylock(&logbuf_lock))
+ goto out_restore_lockdep_irqs;
+ } else {
+ raw_spin_lock(&logbuf_lock);
+ }
+
logbuf_cpu = this_cpu;
if (recursion_bug) {
if (console_trylock_for_printk(this_cpu))
console_unlock();
+out_restore_lockdep_irqs:
lockdep_on();
out_restore_irqs:
local_irq_restore(flags);
if (isspace(ch)) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
- } else {
+ } else if (parser->idx < parser->size - 1) {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
+ } else {
+ ret = -EINVAL;
+ goto out;
}
*ppos += read;
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event);
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+ if (hcon->type == LE_LINK) {
+ if (type == ADDR_LE_DEV_PUBLIC)
+ return BDADDR_LE_PUBLIC;
+ else
+ return BDADDR_LE_RANDOM;
+ }
+
+ return BDADDR_BREDR;
+}
+
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
+ chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+ chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
l2cap_chan_add(conn, chan);
/* Set destination address and psm */
lock_sock(sk);
bacpy(&bt_sk(sk)->dst, dst);
+ chan->dst_type = dst_type;
release_sock(sk);
chan->psm = psm;
auth_type = l2cap_get_auth_type(chan);
- if (chan->dcid == L2CAP_CID_LE_DATA)
+ if (bdaddr_type_is_le(dst_type))
hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
chan->sec_level, auth_type);
else
/* Update source addr of the socket */
bacpy(src, conn->src);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
l2cap_chan_unlock(chan);
l2cap_chan_add(conn, chan);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
+ chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+ chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
chan->psm = psm;
chan->dcid = scid;
chan->local_amp_id = amp_id;
__le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
chan->sec_level = BT_SECURITY_SDP;
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ bacpy(&chan->src, &la.l2_bdaddr);
+ chan->src_type = la.l2_bdaddr_type;
chan->state = BT_BOUND;
sk->sk_state = BT_BOUND;
la->l2_psm = chan->psm;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
la->l2_cid = cpu_to_le16(chan->dcid);
+ la->l2_bdaddr_type = chan->dst_type;
} else {
la->l2_psm = chan->sport;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
la->l2_cid = cpu_to_le16(chan->scid);
+ la->l2_bdaddr_type = chan->src_type;
}
return 0;
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
+ inet_frag_lru_add(nf, qp);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
- inet_frag_lru_add(nf, qp);
+
return qp;
}
const char *msg;
u_int8_t state;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
unsigned int cscov;
const char *msg;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
config SND_COMPRESS_OFFLOAD
tristate
+config SND_EFFECTS_OFFLOAD
+ tristate
+
# To be effective this also requires INPUT - users should say:
# select SND_JACK if INPUT=y || INPUT=SND
# to avoid having to force INPUT on.
snd-hwdep-objs := hwdep.o
snd-compress-objs := compress_offload.o
+snd-effects-objs := effects_offload.o
obj-$(CONFIG_SND) += snd.o
obj-$(CONFIG_SND_HWDEP) += snd-hwdep.o
obj-$(CONFIG_SND_SEQUENCER) += seq/
obj-$(CONFIG_SND_COMPRESS_OFFLOAD) += snd-compress.o
+obj-$(CONFIG_SND_EFFECTS_OFFLOAD) += snd-effects.o
}
runtime->state = SNDRV_PCM_STATE_OPEN;
init_waitqueue_head(&runtime->sleep);
+ init_waitqueue_head(&runtime->wait);
data->stream.runtime = runtime;
f->private_data = (void *)data;
mutex_lock(&compr->lock);
static int snd_compr_free(struct inode *inode, struct file *f)
{
struct snd_compr_file *data = f->private_data;
+ struct snd_compr_runtime *runtime = data->stream.runtime;
+
+ switch (runtime->state) {
+ case SNDRV_PCM_STATE_RUNNING:
+ case SNDRV_PCM_STATE_DRAINING:
+ case SNDRV_PCM_STATE_PAUSED:
+ data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+ break;
+ default:
+ break;
+ }
+
data->stream.ops->free(&data->stream);
kfree(data->stream.runtime->buffer);
kfree(data->stream.runtime);
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
- int retval;
+ int retval = 0;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
mutex_lock(&stream->device->lock);
- /* write is allowed when stream is running or has been steup */
+ /*
+ * if the stream is in paused state, return the
+ * number of bytes consumed as 0
+ */
+ if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED) {
+ mutex_unlock(&stream->device->lock);
+ return retval;
+ }
+ /* write is allowed when stream is running or prepared or in setup */
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
- stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
+ stream->runtime->state != SNDRV_PCM_STATE_RUNNING &&
+ stream->runtime->state != SNDRV_PCM_STATE_PREPARED) {
mutex_unlock(&stream->device->lock);
return -EBADFD;
}
return -EFAULT;
mutex_lock(&stream->device->lock);
- if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED ||
- stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
+ if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
retval = -EBADFD;
goto out;
}
retval = snd_compr_get_poll(stream);
break;
default:
+ pr_err("poll returns err!...\n");
if (stream->direction == SND_COMPRESS_PLAYBACK)
retval = POLLOUT | POLLWRNORM | POLLERR;
else
{
int retval;
- if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+ if ((stream->runtime->state != SNDRV_PCM_STATE_RUNNING) &&
+ (stream->runtime->state != SNDRV_PCM_STATE_DRAINING))
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
if (!retval)
if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
- if (!retval)
+ if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+ wake_up(&stream->runtime->sleep);
+ }
return retval;
}
return retval;
}
-static int snd_compr_stop(struct snd_compr_stream *stream)
+int snd_compr_stop(struct snd_compr_stream *stream)
{
- int retval;
+ int retval = 0;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
- retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
+ snd_compr_drain_notify(stream);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
}
return retval;
}
+EXPORT_SYMBOL(snd_compr_stop);
+
+static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
+{
+ /*
+ * We are called with lock held. So drop the lock while we wait for
+ * drain complete notfication from the driver
+ *
+ * It is expected that driver will notify the drain completion and then
+ * stream will be moved to SETUP state, even if draining resulted in an
+ * error. We can trigger next track after this.
+ */
+ stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+ mutex_unlock(&stream->device->lock);
+
+ wait_event(stream->runtime->wait, stream->runtime->drain_wake);
+
+ wake_up(&stream->runtime->sleep);
+ mutex_lock(&stream->device->lock);
+
+ return 0;
+}
static int snd_compr_drain(struct snd_compr_stream *stream)
{
- int retval;
+ int retval = 0;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
- retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
- if (!retval) {
- stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+
+ stream->runtime->drain_wake = 0;
+
+ /* this is hackish for our tree but for now lets carry it while we fix
+ * usermode behaviour
+ */
+ if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+ else
+ return 0;
+
+ if (retval) {
+ pr_err("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
wake_up(&stream->runtime->sleep);
+ return retval;
}
+
+ retval = snd_compress_wait_for_drain(stream);
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
return retval;
}
static int snd_compr_partial_drain(struct snd_compr_stream *stream)
{
- int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ int retval = 0;
+
+ /* agaain hackish changes */
+ if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
- retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+ stream->runtime->drain_wake = 0;
+ if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+ else
+ return 0;
+
+ if (retval) {
+ pr_err("Partial drain returned failure\n");
+ wake_up(&stream->runtime->sleep);
+ return retval;
+ }
stream->next_track = false;
+ retval = snd_compress_wait_for_drain(stream);
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
return retval;
}
.write = snd_compr_write,
.read = snd_compr_read,
.unlocked_ioctl = snd_compr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = snd_compr_ioctl,
+#endif
.mmap = snd_compr_mmap,
.poll = snd_compr_poll,
};
return -EBADFD;
compr = device->device_data;
- sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
+ snprintf(str, sizeof(str), "comprC%iD%i", compr->card->number, compr->device);
pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
compr->direction);
/* register compressed device */
--- /dev/null
+/*
+ * effect_offload.c - effects offload core
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Authors: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ * Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
+
+#include <linux/module.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/effect_offload.h>
+#include <sound/effect_driver.h>
+
+static DEFINE_MUTEX(effect_mutex);
+
+int snd_ctl_effect_create(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ struct snd_effect *effect;
+
+ effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+ if (!effect)
+ return -ENOMEM;
+ if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+ retval = -EFAULT;
+ goto out;
+ }
+ pr_debug("effect_offload: device %d, pos %d, mode%d\n",
+ effect->device, effect->pos, effect->mode);
+
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->create(card, effect);
+ mutex_unlock(&card->effect_lock);
+out:
+ kfree(effect);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_create);
+
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ struct snd_effect *effect;
+
+ effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+ if (!effect)
+ return -ENOMEM;
+ if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+ retval = -EFAULT;
+ goto out;
+ }
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->destroy(card, effect);
+ mutex_unlock(&card->effect_lock);
+out:
+ kfree(effect);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_destroy);
+
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ struct snd_effect_params *params;
+ char __user *argp = (char __user *)arg;
+ char __user *bufp;
+
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ if (copy_from_user(params, argp, sizeof(*params))) {
+ retval = -EFAULT;
+ goto out;
+ }
+ bufp = params->buffer;
+ params->buffer = kmalloc(params->size, GFP_KERNEL);
+ if (!params->buffer) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user((void *)params->buffer, bufp, params->size)) {
+ retval = -EFAULT;
+ goto free_buf;
+ }
+
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->set_params(card, params);
+ mutex_unlock(&card->effect_lock);
+free_buf:
+ kfree(params->buffer);
+out:
+ kfree(params);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_set_params);
+
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ struct snd_effect_params inparams;
+ struct snd_effect_params *outparams;
+ unsigned int offset;
+ char __user *argp = (char __user *)arg;
+
+ if (copy_from_user((void *)&inparams, argp, sizeof(inparams)))
+ retval = -EFAULT;
+
+ outparams = kmalloc(sizeof(*outparams), GFP_KERNEL);
+ if (!outparams)
+ return -ENOMEM;
+
+ memcpy(outparams, &inparams, sizeof(inparams));
+ outparams->buffer = kmalloc(inparams.size, GFP_KERNEL);
+ if (!outparams->buffer) {
+ retval = -ENOMEM;
+ goto free_out;
+ }
+
+ if (copy_from_user((void *)outparams->buffer, inparams.buffer,
+ inparams.size)) {
+ retval = -EFAULT;
+ goto free_buf;
+ }
+
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->get_params(card, outparams);
+ mutex_unlock(&card->effect_lock);
+
+ if (retval)
+ goto free_buf;
+
+ if (!outparams->size)
+ goto free_buf;
+
+ if (outparams->size > inparams.size) {
+ pr_err("mem insufficient to copy\n");
+ retval = -EMSGSIZE;
+ goto free_buf;
+ } else {
+ offset = offsetof(struct snd_effect_params, size);
+ if (copy_to_user((argp + offset), (void *)&outparams->size,
+ sizeof(u32)))
+ retval = -EFAULT;
+
+ if (copy_to_user(inparams.buffer, outparams->buffer,
+ outparams->size))
+ retval = -EFAULT;
+ }
+free_buf:
+ kfree(outparams->buffer);
+free_out:
+ kfree(outparams);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_get_params);
+
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ int __user *ip = arg;
+
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->query_num_effects(card);
+ mutex_unlock(&card->effect_lock);
+
+ if (retval < 0)
+ goto out;
+ retval = put_user(retval, ip) ? -EFAULT : 0;
+out:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_num_effects);
+
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg)
+{
+ int retval = 0;
+ struct snd_effect_caps *caps;
+ unsigned int offset, insize;
+ char __user *argp = (char __user *)arg;
+ char __user *bufp;
+
+ caps = kmalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ if (copy_from_user(caps, argp, sizeof(*caps))) {
+ retval = -EFAULT;
+ goto out;
+ }
+ bufp = caps->buffer;
+ insize = caps->size;
+ caps->buffer = kmalloc(caps->size, GFP_KERNEL);
+ if (!caps->buffer) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&card->effect_lock);
+ retval = card->effect_ops->query_effect_caps(card, caps);
+ mutex_unlock(&card->effect_lock);
+
+ if (retval)
+ goto free_buf;
+
+ if (insize < caps->size) {
+ pr_err("mem insufficient to copy\n");
+ retval = -EMSGSIZE;
+ goto free_buf;
+ }
+
+ offset = offsetof(struct snd_effect_caps, size);
+ if (copy_to_user((argp + offset), (void *)&caps->size, sizeof(u32))) {
+ retval = -EFAULT;
+ goto free_buf;
+ }
+
+ if (copy_to_user(bufp, caps->buffer, caps->size))
+ retval = -EFAULT;
+
+free_buf:
+ kfree(caps->buffer);
+out:
+ kfree(caps);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_effect_caps);
+
+/**
+ * snd_effect_register - register compressed device
+ *
+ * @card : snd card to which the effect is registered
+ * @ops : effect_ops to register
+ */
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops)
+{
+
+ if (card == NULL || ops == NULL)
+ return -EINVAL;
+
+ if (snd_BUG_ON(!ops->create))
+ return -EINVAL;
+ if (snd_BUG_ON(!ops->destroy))
+ return -EINVAL;
+ if (snd_BUG_ON(!ops->set_params))
+ return -EINVAL;
+
+ mutex_init(&card->effect_lock);
+
+ pr_debug("Registering Effects to card %s\n", card->shortname);
+ /* register the effect ops with the card */
+ mutex_lock(&effect_mutex);
+ card->effect_ops = ops;
+ mutex_unlock(&effect_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_register);
+
+int snd_effect_deregister(struct snd_card *card)
+{
+ pr_debug("Removing effects for card %s\n", card->shortname);
+ mutex_lock(&effect_mutex);
+ card->effect_ops = NULL;
+ mutex_unlock(&effect_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_deregister);
+
+static int __init snd_effect_init(void)
+{
+ return 0;
+}
+
+static void __exit snd_effect_exit(void)
+{
+}
+
+module_init(snd_effect_init);
+module_exit(snd_effect_exit);
+
+MODULE_DESCRIPTION("ALSA Effect offload framework");
+MODULE_AUTHOR("Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_LICENSE("GPL v2");
source "sound/soc/nuc900/Kconfig"
source "sound/soc/omap/Kconfig"
source "sound/soc/kirkwood/Kconfig"
-source "sound/soc/mid-x86/Kconfig"
+source "sound/soc/intel/Kconfig"
source "sound/soc/mxs/Kconfig"
source "sound/soc/pxa/Kconfig"
source "sound/soc/samsung/Kconfig"
obj-$(CONFIG_SND_SOC) += dwc/
obj-$(CONFIG_SND_SOC) += fsl/
obj-$(CONFIG_SND_SOC) += jz4740/
-obj-$(CONFIG_SND_SOC) += mid-x86/
+obj-$(CONFIG_SND_SOC) += intel/
obj-$(CONFIG_SND_SOC) += mxs/
obj-$(CONFIG_SND_SOC) += nuc900/
obj-$(CONFIG_SND_SOC) += omap/
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/gcd.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
static const struct wm8958_micd_rate micdet_rates[] = {
{ 32768, true, 1, 4 },
- { 32768, false, 1, 1 },
+ { 32768, false, 1, 0 },
{ 44100 * 256, true, 7, 10 },
- { 44100 * 256, false, 7, 10 },
+ { 44100 * 256, false, 7, 9 },
};
static const struct wm8958_micd_rate jackdet_rates[] = {
static const struct snd_kcontrol_new wm8994_snd_controls[] = {
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
WM8994_AIF1_ADC1_RIGHT_VOLUME,
- 1, 119, 0, digital_tlv),
+ 1, 120, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
WM8994_AIF1_ADC2_RIGHT_VOLUME,
- 1, 119, 0, digital_tlv),
+ 1, 120, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
WM8994_AIF2_ADC_RIGHT_VOLUME,
- 1, 119, 0, digital_tlv),
+ 1, 120, 0, digital_tlv),
SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
SOC_ENUM("DAC OSR", dac_osr),
SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
- WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+ WM8994_DAC1_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
WM8994_DAC1_RIGHT_VOLUME, 9, 1, 1),
SOC_DOUBLE_R_TLV("DAC2 Volume", WM8994_DAC2_LEFT_VOLUME,
- WM8994_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+ WM8994_DAC2_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
SOC_DOUBLE_R("DAC2 Switch", WM8994_DAC2_LEFT_VOLUME,
WM8994_DAC2_RIGHT_VOLUME, 9, 1, 1),
10, 15, 0, wm8994_3d_tlv),
SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
8, 1, 0),
+
+SOC_SINGLE_TLV("MIXINL MIXOUTL Volume", WM8994_INPUT_MIXER_3, 0, 7, 0,
+ mixin_boost_tlv),
+SOC_SINGLE_TLV("MIXINR MIXOUTR Volume", WM8994_INPUT_MIXER_4, 0, 7, 0,
+ mixin_boost_tlv),
};
static const struct snd_kcontrol_new wm8994_eq_controls[] = {
WM8994_BIAS_SRC |
WM8994_STARTUP_BIAS_ENA |
WM8994_VMID_BUF_ENA |
- (0x2 << WM8994_VMID_RAMP_SHIFT));
+ (0x3 << WM8994_VMID_RAMP_SHIFT));
/* Main bias enable, VMID=2x40k */
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
WM8994_VMID_SEL_MASK,
WM8994_BIAS_ENA | 0x2);
- msleep(300);
+ /* The delay of 300ms was recommended to support pop
+ * free startup of the line output driver, as we don't use
+ * that feature reducing the delay to 50ms as recommended in
+ * the spec, Also changing VMID_RAMP to soft fast start
+ * accordingly Also applies for VMID_FORCE and
+ * vmid_dereference.
+ */
+ msleep(50);
snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
WM8994_VMID_RAMP_MASK |
WM8994_BIAS_SRC |
WM8994_STARTUP_BIAS_ENA |
WM8994_VMID_BUF_ENA |
- (0x2 << WM8994_VMID_RAMP_SHIFT));
+ (0x3 << WM8994_VMID_RAMP_SHIFT));
/* Main bias enable, VMID=2x40k */
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
WM8994_BIAS_ENA |
WM8994_VMID_SEL_MASK,
WM8994_BIAS_ENA | 0x2);
-
- msleep(400);
+ msleep(50);
snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
WM8994_VMID_RAMP_MASK |
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
WM8994_VMID_SEL_MASK, 0);
- msleep(400);
+ msleep(50);
/* Active discharge */
snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
WM8994_VMID_RAMP_MASK, 0);
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
- WM8994_VMID_SEL_MASK, 0);
+ WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0);
}
pm_runtime_put(codec->dev);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Don't enable timeslot 2 if not in use */
- if (wm8994->channels[0] <= 2)
+ if ((wm8994->channels[0] <= 2) && (wm8994->slots <= 2))
mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
struct snd_soc_codec *codec = w->codec;
unsigned int mask = 1 << w->shift;
+ /* Don't propagate FIFO errors unless the DAC is running */
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Clear FIFO error status */
+ snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2,
+ WM8994_FIFOS_ERR_EINT_MASK,
+ 1 << WM8994_FIFOS_ERR_EINT_SHIFT);
+ /* Unmask FIFO error interrupts */
+ snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+ WM8994_IM_FIFOS_ERR_EINT_MASK,
+ 0 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Mask FIFO error interrupts */
+ snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+ WM8994_IM_FIFOS_ERR_EINT_MASK,
+ 1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+ break;
+ }
+
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
mask, mask);
return 0;
"AIF1DACDAT", "AIF3DACDAT",
};
+static const char *loopback_text[] = {
+ "None", "ADCDAT",
+};
+
+static const struct soc_enum aif1_loopback_enum =
+ SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2,
+ loopback_text);
+
+static const struct snd_kcontrol_new aif1_loopback =
+ SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum);
+
+static const struct soc_enum aif2_loopback_enum =
+ SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2,
+ loopback_text);
+
+static const struct snd_kcontrol_new aif2_loopback =
+ SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum);
+
static const struct soc_enum aif1dac_enum =
SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text);
static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
- dac_ev, SND_SOC_DAPM_PRE_PMU),
+ dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
- dac_ev, SND_SOC_DAPM_PRE_PMU),
+ dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
- dac_ev, SND_SOC_DAPM_PRE_PMU),
+ dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
- dac_ev, SND_SOC_DAPM_PRE_PMU),
+ dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
};
static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
};
static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux,
adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux,
adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
};
static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux),
};
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8994_POWER_MANAGEMENT_4, 3, 0),
SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
-/* Power is done with the muxes since the ADC power also controls the
- * downsampling chain, the chip will automatically manage the analogue
- * specific portions.
- */
-SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
-SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8994_POWER_MANAGEMENT_4, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8994_POWER_MANAGEMENT_4, 0, 0),
+
+SND_SOC_DAPM_MUX("AIF1 Loopback", SND_SOC_NOPM, 0, 0, &aif1_loopback),
+SND_SOC_DAPM_MUX("AIF2 Loopback", SND_SOC_NOPM, 0, 0, &aif2_loopback),
SND_SOC_DAPM_POST("Debug log", post_ev),
};
{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
- { "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
+ { "AIF1DAC Mux", "AIF1DACDAT", "AIF1 Loopback" },
{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
- { "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
+ { "AIF2DAC Mux", "AIF2DACDAT", "AIF2 Loopback" },
{ "AIF2DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
+ /* Loopback */
+ { "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
+ { "AIF1 Loopback", "None", "AIF1DACDAT" },
+ { "AIF2 Loopback", "ADCDAT", "AIF2ADCDAT" },
+ { "AIF2 Loopback", "None", "AIF2DACDAT" },
+
/* Sidetone */
{ "Left Sidetone", "ADC/DMIC1", "ADCL Mux" },
{ "Left Sidetone", "DMIC2", "DMIC2L" },
u16 outdiv;
u16 n;
u16 k;
+ u16 lambda;
u16 clk_ref_div;
u16 fll_fratio;
};
-static int wm8994_get_fll_config(struct fll_div *fll,
+static int wm8994_get_fll_config(struct wm8994 *control, struct fll_div *fll,
int freq_in, int freq_out)
{
u64 Kpart;
- unsigned int K, Ndiv, Nmod;
+ unsigned int K, Ndiv, Nmod, gcd_fll;
pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
Nmod = freq_out % freq_in;
pr_debug("Nmod=%d\n", Nmod);
- /* Calculate fractional part - scale up so we can round. */
- Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+ switch (control->type) {
+ case WM8994:
+ /* Calculate fractional part - scale up so we can round. */
+ Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, freq_in);
+
+ K = Kpart & 0xFFFFFFFF;
- do_div(Kpart, freq_in);
+ if ((K % 10) >= 5)
+ K += 5;
- K = Kpart & 0xFFFFFFFF;
+ /* Move down to proper range now rounding is done */
+ fll->k = K / 10;
+ fll->lambda = 0;
- if ((K % 10) >= 5)
- K += 5;
+ pr_debug("N=%x K=%x\n", fll->n, fll->k);
+ break;
- /* Move down to proper range now rounding is done */
- fll->k = K / 10;
+ default:
+ gcd_fll = gcd(freq_out, freq_in);
- pr_debug("N=%x K=%x\n", fll->n, fll->k);
+ fll->k = (freq_out - (freq_in * fll->n)) / gcd_fll;
+ fll->lambda = freq_in / gcd_fll;
+
+ }
return 0;
}
* analysis bugs spewing warnings.
*/
if (freq_out)
- ret = wm8994_get_fll_config(&fll, freq_in, freq_out);
+ ret = wm8994_get_fll_config(control, &fll, freq_in, freq_out);
else
- ret = wm8994_get_fll_config(&fll, wm8994->fll[id].in,
+ ret = wm8994_get_fll_config(control, &fll, wm8994->fll[id].in,
wm8994->fll[id].out);
if (ret < 0)
return ret;
WM8994_FLL1_N_MASK,
fll.n << WM8994_FLL1_N_SHIFT);
+ if (fll.lambda) {
+ snd_soc_update_bits(codec, WM8958_FLL1_EFS_1 + reg_offset,
+ WM8958_FLL1_LAMBDA_MASK,
+ fll.lambda);
+ snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+ WM8958_FLL1_EFS_ENA, WM8958_FLL1_EFS_ENA);
+ } else {
+ snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+ WM8958_FLL1_EFS_ENA, 0);
+ }
+
snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_5 + reg_offset,
WM8994_FLL1_FRC_NCO | WM8958_FLL1_BYP |
WM8994_FLL1_REFCLK_DIV_MASK |
* If SYSCLK will be less than 50kHz adjust AIFnCLK dividers
* for detection.
*/
- if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000) {
+ if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000 &&
+ !wm8994->aifdiv[0]) {
dev_dbg(codec->dev, "Configuring AIFs for 128fs\n");
wm8994->aifdiv[0] = snd_soc_read(codec, WM8994_AIF1_RATE)
struct wm8994 *control = wm8994->wm8994;
int ms_reg;
int aif1_reg;
+ int dac_reg;
+ int adc_reg;
int ms = 0;
int aif1 = 0;
+ int lrclk = 0;
switch (dai->id) {
case 1:
ms_reg = WM8994_AIF1_MASTER_SLAVE;
aif1_reg = WM8994_AIF1_CONTROL_1;
+ dac_reg = WM8994_AIF1DAC_LRCLK;
+ adc_reg = WM8994_AIF1ADC_LRCLK;
break;
case 2:
ms_reg = WM8994_AIF2_MASTER_SLAVE;
aif1_reg = WM8994_AIF2_CONTROL_1;
+ dac_reg = WM8994_AIF1DAC_LRCLK;
+ adc_reg = WM8994_AIF1ADC_LRCLK;
break;
default:
return -EINVAL;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif1 |= WM8994_AIF1_LRCLK_INV;
+ lrclk |= WM8958_AIF1_LRCLK_INV;
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x18;
break;
break;
case SND_SOC_DAIFMT_IB_IF:
aif1 |= WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV;
+ lrclk |= WM8958_AIF1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
aif1 |= WM8994_AIF1_BCLK_INV;
break;
case SND_SOC_DAIFMT_NB_IF:
aif1 |= WM8994_AIF1_LRCLK_INV;
+ lrclk |= WM8958_AIF1_LRCLK_INV;
break;
default:
return -EINVAL;
aif1);
snd_soc_update_bits(codec, ms_reg, WM8994_AIF1_MSTR,
ms);
+ snd_soc_update_bits(codec, dac_reg,
+ WM8958_AIF1_LRCLK_INV, lrclk);
+ snd_soc_update_bits(codec, adc_reg,
+ WM8958_AIF1_LRCLK_INV, lrclk);
return 0;
}
int lrclk = 0;
int rate_val = 0;
int id = dai->id - 1;
+ struct snd_pcm_hw_params hw_params;
int i, cur_val, best_val, bclk_rate, best;
+ if (params)
+ memcpy(&hw_params, params, sizeof(*params));
+ else
+ return -EINVAL;
+
+ /* If custom params are there, override to custom params */
+ if (pdata->custom_cfg) {
+
+ dev_dbg(codec->dev, "%s: Overriding to custom params....\n",
+ __func__);
+
+ snd_mask_none(hw_param_mask(&hw_params,
+ SNDRV_PCM_HW_PARAM_FORMAT));
+ snd_mask_set(hw_param_mask(&hw_params,
+ SNDRV_PCM_HW_PARAM_FORMAT),
+ pdata->custom_cfg->format);
+
+ hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
+ pdata->custom_cfg->rate;
+ hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
+ pdata->custom_cfg->rate;
+
+ hw_param_interval(&hw_params,
+ SNDRV_PCM_HW_PARAM_CHANNELS)->min =
+ pdata->custom_cfg->channels;
+ hw_param_interval(&hw_params,
+ SNDRV_PCM_HW_PARAM_CHANNELS)->max =
+ pdata->custom_cfg->channels;
+ }
+
switch (dai->id) {
case 1:
aif1_reg = WM8994_AIF1_CONTROL_1;
return -EINVAL;
}
- bclk_rate = params_rate(params);
- switch (params_format(params)) {
+ bclk_rate = params_rate(&hw_params);
+
+ switch (params_format(&hw_params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
break;
return -EINVAL;
}
- wm8994->channels[id] = params_channels(params);
+ wm8994->channels[id] = params_channels(&hw_params);
if (pdata->max_channels_clocked[id] &&
wm8994->channels[id] > pdata->max_channels_clocked[id]) {
dev_dbg(dai->dev, "Constraining channels to %d from %d\n",
/* Try to find an appropriate sample rate; look for an exact match. */
for (i = 0; i < ARRAY_SIZE(srs); i++)
- if (srs[i].rate == params_rate(params))
+ if (srs[i].rate == params_rate(&hw_params))
break;
if (i == ARRAY_SIZE(srs))
return -EINVAL;
/* AIFCLK/fs ratio; look for a close match in either direction */
best = 0;
- best_val = abs((fs_ratios[0] * params_rate(params))
+ best_val = abs((fs_ratios[0] * params_rate(&hw_params))
- wm8994->aifclk[id]);
for (i = 1; i < ARRAY_SIZE(fs_ratios); i++) {
- cur_val = abs((fs_ratios[i] * params_rate(params))
+ cur_val = abs((fs_ratios[i] * params_rate(&hw_params))
- wm8994->aifclk[id]);
if (cur_val >= best_val)
continue;
bclk_divs[best], bclk_rate);
bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
- lrclk = bclk_rate / params_rate(params);
+ lrclk = bclk_rate / params_rate(&hw_params);
if (!lrclk) {
dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
bclk_rate);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
switch (dai->id) {
case 1:
- wm8994->dac_rates[0] = params_rate(params);
+ wm8994->dac_rates[0] = params_rate(&hw_params);
wm8994_set_retune_mobile(codec, 0);
wm8994_set_retune_mobile(codec, 1);
break;
case 2:
- wm8994->dac_rates[1] = params_rate(params);
+ wm8994->dac_rates[1] = params_rate(&hw_params);
wm8994_set_retune_mobile(codec, 2);
break;
}
return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
}
+#if IS_ENABLED(CONFIG_SND_MRFLD_MACHINE) || \
+ IS_ENABLED(CONFIG_SND_MOOR_MACHINE)
+static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ return 0;
+}
+#else
static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_codec *codec = codec_dai->codec;
return 0;
}
+#endif
static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
{
return snd_soc_update_bits(codec, reg, mask, val);
}
+static int wm8994_set_tdm_slots(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ struct wm8994 *control = wm8994->wm8994;
+
+ switch (control->type) {
+ case WM8958:
+ wm8994->slots = slots;
+ break;
+ default:
+ pr_err("we dont support tdm for non 8958!");
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
static int wm8994_aif2_probe(struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
.digital_mute = wm8994_aif_mute,
.set_pll = wm8994_set_fll,
.set_tristate = wm8994_set_tristate,
+ .set_tdm_slot = wm8994_set_tdm_slots,
};
static const struct snd_soc_dai_ops wm8994_aif2_dai_ops = {
static int wm8994_codec_resume(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994 *control = wm8994->wm8994;
int i, ret;
- unsigned int val, mask;
-
- if (control->revision < 4) {
- /* force a HW read */
- ret = regmap_read(control->regmap,
- WM8994_POWER_MANAGEMENT_5, &val);
-
- /* modify the cache only */
- codec->cache_only = 1;
- mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
- WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
- val &= mask;
- snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
- mask, val);
- codec->cache_only = 0;
- }
for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
if (!wm8994->fll_suspend[i].out)
wm8994->btn_mask);
}
+static void wm8958_open_circuit_work(struct work_struct *work)
+{
+ struct wm8994_priv *wm8994 = container_of(work,
+ struct wm8994_priv,
+ open_circuit_work.work);
+ struct device *dev = wm8994->wm8994->dev;
+
+ wm1811_micd_stop(wm8994->hubs.codec);
+
+ mutex_lock(&wm8994->accdet_lock);
+
+ dev_dbg(dev, "Reporting open circuit\n");
+
+ wm8994->jack_mic = false;
+ wm8994->mic_detecting = true;
+ wm8994->headphone_detected = false;
+
+ wm8958_micd_set_rate(wm8994->hubs.codec);
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+ wm8994->btn_mask |
+ SND_JACK_HEADSET);
+
+ mutex_unlock(&wm8994->accdet_lock);
+}
+
static void wm8958_mic_id(void *data, u16 status)
{
struct snd_soc_codec *codec = data;
if (!(status & WM8958_MICD_STS)) {
/* If nothing present then clear our statuses */
dev_dbg(codec->dev, "Detected open circuit\n");
- wm8994->jack_mic = false;
- wm8994->mic_detecting = true;
-
- wm1811_micd_stop(codec);
- wm8958_micd_set_rate(codec);
-
- snd_soc_jack_report(wm8994->micdet[0].jack, 0,
- wm8994->btn_mask |
- SND_JACK_HEADSET);
+ schedule_delayed_work(&wm8994->open_circuit_work,
+ msecs_to_jiffies(2500));
return;
}
dev_dbg(codec->dev, "Starting mic detection\n");
- /* Use a user-supplied callback if we have one */
- if (wm8994->micd_cb) {
- wm8994->micd_cb(wm8994->micd_cb_data);
- } else {
+ /* If there's a callback it'll be called out of the lock */
+ if (!wm8994->micd_cb) {
/*
* Start off measument of microphone impedence to find out
* what's actually there.
mutex_unlock(&wm8994->accdet_lock);
+ /* Custom callbacks may reasonably wish to take the same locks */
+ if (wm8994->micd_cb)
+ wm8994->micd_cb(wm8994->micd_cb_data);
+
pm_runtime_put(codec->dev);
}
int reg, delay;
bool present;
+ cancel_delayed_work_sync(&wm8994->mic_work);
+
pm_runtime_get_sync(codec->dev);
+ cancel_delayed_work_sync(&wm8994->mic_complete_work);
+
mutex_lock(&wm8994->accdet_lock);
reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
} else {
dev_dbg(codec->dev, "Jack not detected\n");
- cancel_delayed_work_sync(&wm8994->mic_work);
-
snd_soc_update_bits(codec, WM8958_MICBIAS2,
WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
} else {
wm8994->mic_detecting = true;
wm8994->jack_mic = false;
+ wm8994->headphone_detected = false;
}
if (id_cb) {
}
EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+ wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+ void *micd_custom_rate_cb_data)
+{
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ if (micd_custom_rate_cb) {
+ wm8994->micd_custom_rate_cb = micd_custom_rate_cb;
+ wm8994->micd_custom_rate_cb_data = micd_custom_rate_cb_data;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_micd_set_custom_rate);
+
+static void wm8958_mic_work(struct work_struct *work)
+{
+ struct wm8994_priv *wm8994 = container_of(work,
+ struct wm8994_priv,
+ mic_complete_work.work);
+ struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+ dev_crit(codec->dev, "MIC WORK %x\n", wm8994->mic_status);
+
+ pm_runtime_get_sync(codec->dev);
+
+ mutex_lock(&wm8994->accdet_lock);
+
+ wm8994->mic_id_cb(wm8994->mic_id_cb_data, wm8994->mic_status);
+
+ mutex_unlock(&wm8994->accdet_lock);
+
+ pm_runtime_put(codec->dev);
+
+ dev_crit(codec->dev, "MIC WORK %x DONE\n", wm8994->mic_status);
+}
+
+static void wm8958_micd_set_custom_rate_work(struct work_struct *work)
+{
+ struct wm8994_priv *wm8994 = container_of(work,
+ struct wm8994_priv,
+ micd_set_custom_rate_work.work);
+ struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+ dev_dbg(codec->dev, "%s: Set custom rates\n", __func__);
+
+ pm_runtime_get_sync(codec->dev);
+
+ mutex_lock(&wm8994->accdet_lock);
+
+ wm8994->micd_custom_rate_cb(wm8994->micd_custom_rate_cb_data);
+
+ mutex_unlock(&wm8994->accdet_lock);
+
+ pm_runtime_put(codec->dev);
+
+}
+
static irqreturn_t wm8958_mic_irq(int irq, void *data)
{
struct wm8994_priv *wm8994 = data;
struct snd_soc_codec *codec = wm8994->hubs.codec;
- int reg, count, ret;
+ struct wm8994 *control = wm8994->wm8994;
+ int reg, count, ret, id_delay;
/*
* Jack detection may have detected a removal simulataneously
if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
return IRQ_HANDLED;
+ cancel_delayed_work_sync(&wm8994->mic_complete_work);
+ cancel_delayed_work_sync(&wm8994->open_circuit_work);
+
pm_runtime_get_sync(codec->dev);
/* We may occasionally read a detection without an impedence
snd_soc_jack_report(wm8994->micdet[0].jack, 0,
SND_JACK_MECHANICAL | SND_JACK_HEADSET |
wm8994->btn_mask);
+ wm8994->jack_mic = false;
+ wm8994->headphone_detected = false;
wm8994->mic_detecting = true;
goto out;
}
- if (wm8994->mic_detecting)
- wm8994->mic_id_cb(wm8994->mic_id_cb_data, reg);
- else
+ wm8994->mic_status = reg;
+ id_delay = wm8994->wm8994->pdata.mic_id_delay;
+
+ if (wm8994->mic_detecting) {
+ if (control->type == WM8958) {
+ /* Set mic-bias high during detection phase (micb_en_delay) */
+ /* 0 == Continuous */
+ dev_dbg(codec->dev, "Set MICBIAS High, for micb_en_delay time\n");
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_BIAS_STARTTIME_MASK |
+ WM8958_MICD_RATE_MASK, 0);
+ }
+
+ schedule_delayed_work(&wm8994->mic_complete_work,
+ msecs_to_jiffies(id_delay));
+ } else {
wm8958_button_det(codec, reg);
+ }
out:
pm_runtime_put(codec->dev);
struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = &codec->dapm;
+ unsigned int dcs_done_irq;
unsigned int reg;
int ret, i;
mutex_init(&wm8994->accdet_lock);
INIT_DELAYED_WORK(&wm8994->jackdet_bootstrap,
wm1811_jackdet_bootstrap);
+ INIT_DELAYED_WORK(&wm8994->open_circuit_work,
+ wm8958_open_circuit_work);
switch (control->type) {
case WM8994:
case WM1811:
INIT_DELAYED_WORK(&wm8994->mic_work, wm1811_mic_work);
break;
+ case WM8958:
+ INIT_DELAYED_WORK(&wm8994->micd_set_custom_rate_work,
+ wm8958_micd_set_custom_rate_work);
+ break;
default:
break;
}
+ INIT_DELAYED_WORK(&wm8994->mic_complete_work, wm8958_mic_work);
+
for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
init_completion(&wm8994->fll_locked[i]);
wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_TEMP_SHUT,
wm8994_temp_shut, "Thermal shutdown", codec);
+ dcs_done_irq = regmap_irq_get_virq(wm8994->wm8994->irq_data,
+ WM8994_IRQ_DCS_DONE);
+ irq_set_status_flags(dcs_done_irq, IRQ_NOAUTOEN);
ret = wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_DCS_DONE,
wm_hubs_dcs_done, "DC servo done",
&wm8994->hubs);
}
if ((reg & WM8994_GPN_FN_MASK) != WM8994_GP_FN_PIN_SPECIFIC) {
wm8994->lrclk_shared[0] = 1;
- wm8994_dai[0].symmetric_rates = 1;
} else {
wm8994->lrclk_shared[0] = 0;
}
}
wm_hubs_add_analogue_routes(codec, 0, 0);
+ enable_irq(dcs_done_irq);
snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
switch (control->type) {
break;
}
+ /* Make sure FIFO errors are masked */
+ snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+ WM8994_IM_FIFOS_ERR_EINT_MASK,
+ 1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+
return 0;
err_irq:
static int wm8994_suspend(struct device *dev)
{
struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+ struct wm8994 *control = wm8994->wm8994;
+ struct snd_soc_codec *codec = wm8994->hubs.codec;
+ unsigned int reg;
+ int ret;
+
/* Drop down to power saving mode when system is suspended */
if (wm8994->jackdet && !wm8994->active_refcount)
WM1811_JACKDET_MODE_MASK,
wm8994->jackdet_mode);
+ /* Disable the MIC Detection when suspended */
+ if ((control->type == WM8958) && wm8994->mic_id_cb) {
+
+ reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+
+ dev_dbg(codec->dev, "%s: WM8958_MIC_DETECT_3 0x%x\n", __func__, reg);
+ dev_dbg(codec->dev, "mic_detect %d jack_mic %d headphone %d\n",
+ wm8994->mic_detecting, wm8994->jack_mic,
+ wm8994->headphone_detected);
+
+ if (!(wm8994->jack_mic) && !(wm8994->headphone_detected)) {
+
+ dev_dbg(codec->dev, "Jack not connected..Mask interrupt\n");
+ snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x01);
+
+ ret = regcache_sync_region(wm8994->wm8994->regmap,
+ WM8994_INTERRUPT_CONTROL,
+ WM8994_INTERRUPT_CONTROL);
+ if (ret != 0)
+ dev_err(dev, "Failed to sync register: %d\n", ret);
+ synchronize_irq(control->irq);
+
+ dev_dbg(codec->dev, "Disable MIC Detection!!!\n");
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_ENA, 0);
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "CLK_SYS");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
+ }
+
return 0;
}
static int wm8994_resume(struct device *dev)
{
struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+ struct wm8994 *control = wm8994->wm8994;
+ struct snd_soc_codec *codec = wm8994->hubs.codec;
if (wm8994->jackdet && wm8994->jackdet_mode)
regmap_update_bits(wm8994->wm8994->regmap, WM8994_ANTIPOP_2,
WM1811_JACKDET_MODE_MASK,
WM1811_JACKDET_MODE_AUDIO);
+ /* Enable the MIC Detection when resumed */
+ if ((control->type == WM8958) && wm8994->mic_id_cb) {
+ dev_dbg(codec->dev, "Enable MIC Detection!!!\n");
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "CLK_SYS");
+ snd_soc_dapm_sync(&codec->dapm);
+
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_ENA, WM8958_MICD_ENA);
+ snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x00);
+ }
+
return 0;
}
#endif
typedef void (*wm1811_micdet_cb)(void *data);
typedef void (*wm1811_mic_id_cb)(void *data, u16 status);
+typedef void (*wm8958_micd_set_custom_rate_cb)(struct snd_soc_codec *codec);
int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
int micbias);
void wm8958_dsp2_init(struct snd_soc_codec *codec);
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+ wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+ void *micd_custom_rate_cb_data);
+
struct wm8994_micdet {
struct snd_soc_jack *jack;
bool detecting;
bool fll_locked_irq;
bool fll_byp;
bool clk_has_run;
+ int slots;
int vmid_refcount;
int active_refcount;
struct mutex accdet_lock;
struct wm8994_micdet micdet[2];
struct delayed_work mic_work;
+ struct delayed_work open_circuit_work;
+ struct delayed_work mic_complete_work;
+ struct delayed_work micd_set_custom_rate_work;
+
+ u16 mic_status;
bool mic_detecting;
bool jack_mic;
+ bool headphone_detected;
int btn_mask;
bool jackdet;
int jackdet_mode;
void *micd_cb_data;
wm1811_mic_id_cb mic_id_cb;
void *mic_id_cb_data;
+ wm8958_micd_set_custom_rate_cb micd_custom_rate_cb;
+ void *micd_custom_rate_cb_data;
unsigned int aif1clk_enable:1;
unsigned int aif2clk_enable:1;
hubs->hp_startup_mode);
break;
}
+ break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
WM8993_LINEOUT2_MODE,
WM8993_LINEOUT2_MODE);
- if (!lineout1_diff && !lineout2_diff)
- snd_soc_update_bits(codec, WM8993_ANTIPOP1,
- WM8993_LINEOUT_VMID_BUF_ENA,
- WM8993_LINEOUT_VMID_BUF_ENA);
-
if (lineout1fb)
snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
int val = 0;
+ if ((hubs->lineout1_se && hubs->lineout2_se) &&
+ (hubs->lineout1n_ena || hubs->lineout1p_ena ||
+ hubs->lineout2n_ena || hubs->lineout2p_ena))
+ snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+ WM8993_LINEOUT_VMID_BUF_ENA,
+ WM8993_LINEOUT_VMID_BUF_ENA);
+
if (hubs->lineout1_se)
val |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
val = 0;
mask = 0;
+ if ((hubs->lineout1_se && hubs->lineout2_se) &&
+ (hubs->lineout1n_ena || hubs->lineout1p_ena ||
+ hubs->lineout2n_ena || hubs->lineout2p_ena))
+ snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+ WM8993_LINEOUT_VMID_BUF_ENA,
+ WM8993_LINEOUT_VMID_BUF_ENA);
+
if (hubs->lineout1_se)
mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
--- /dev/null
+config SND_MFLD_MACHINE
+ tristate "SOC Machine Audio driver for Intel Medfield MID platform"
+ depends on INTEL_SCU_IPC && INTEL_SCU_IPC_UTIL && X86 && GPIO_LANGWELL
+ depends on MSIC_GPADC
+ select SND_SOC_SN95031
+ select SND_SST_PLATFORM
+ select SND_SST_MACHINE
+ select SND_INTEL_SST
+ default n
+ help
+ This adds support for ASoC machine driver for Intel(R) MID Medfield platform
+ used as alsa device in audio subsystem in Intel(R) MID devices
+ Say Y if you have such a device
+ If unsure select "N".
+
+config SND_MRFLD_MACHINE
+ tristate "SOC Machine Audio driver for Intel Merrifield MID platform"
+ depends on INTEL_SCU_IPC && X86
+ select SND_SOC_LM49453
+ select SND_SOC_WM8994
+ select MFD_CORE
+ select MFD_WM8994
+ select REGULATOR_WM8994
+ select SND_SST_PLATFORM
+ select SND_SST_MACHINE
+ select SND_INTEL_SST
+ select SND_EFFECTS_OFFLOAD
+ default n
+ help
+ This adds support for ASoC machine driver for Intel(R) MID Merrifield platform
+ used as alsa device in audio substem in Intel(R) MID devices
+ Say Y if you have such a device
+ If unsure select "N".
+
+config SND_INTEL_SST
+ tristate
+
+config SND_SST_PLATFORM
+ tristate
+
+config SND_SOC_COMMS_SSP
+ depends on SND_INTEL_MID_I2S
+ tristate "Use ASOC framework to drive AudioComms SSP BT and Modem"
+ help
+ Sound SOC cards usually used for BT VOIP and MODEM MIXING use cases.
+ This will add devices for these uses cases in the list of alsa cards.
+ Say Y if you need these sound cards (BT chipset or Modem present).
+ Requires to enable the INTEL_MID_I2S low level SSP I2S driver.
+
+config SST_MRFLD_DPCM
+ bool "Use DPCM based Merrifield Machine Audio driver"
+ default n
+ help
+ This adds an option to enable the DPCM based MRFLD machine driver
+
+config SND_SST_MACHINE
+ tristate
+
--- /dev/null
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng))
+ccflags-y += -DCONFIG_SND_VERBOSE_PRINTK -DCONFIG_SND_DEBUG -DCONFIG_SND_DEBUG_VERBOSE
+endif
+
+# SST Platform Driver
+PLATFORM_LIBS = platform-libs/controls_v1.o platform-libs/controls_v2.o platform-libs/controls_v2_dpcm.o \
+ platform-libs/ipc_lib_v2.o
+
+snd-soc-sst-platform-objs := pcm.o compress.o effects.o $(PLATFORM_LIBS)
+obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
+
+# Relevant Machine driver
+obj-$(CONFIG_SND_SST_MACHINE) += board/
+
+# DSP driver
+obj-$(CONFIG_SND_INTEL_SST) += sst/
+
+# Audio Comms
+obj-$(CONFIG_SND_SOC_COMMS_SSP) += ssp/
--- /dev/null
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+# Merrifield board
+snd-merr-saltbay-wm8958-objs := merr_saltbay_wm8958.o
+snd-merr-dpcm-wm8958-objs := merr_dpcm_wm8958.o
+snd-merr-dpcm-dummy-objs := merr_dpcm_dummy.o
+
+ifdef CONFIG_SST_MRFLD_DPCM
+ obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-dpcm-wm8958.o
+ obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-dpcm-dummy.o
+else
+ obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-saltbay-wm8958.o
+endif
--- /dev/null
+/*
+ * ASoc Dummy DPCM Machine driver for Intel Edison MID platform
+ *
+ * Copyright (C) 2014 Intel Corp
+ * Author: Michael Soares <michaelx.soares@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_merr_dpcm_prepare(struct device *dev)
+{
+ pr_debug("In %s device name\n", __func__);
+ snd_soc_suspend(dev);
+ return 0;
+}
+
+static void snd_merr_dpcm_complete(struct device *dev)
+{
+ pr_debug("In %s\n", __func__);
+ snd_soc_resume(dev);
+ return;
+}
+
+static int snd_merr_dpcm_poweroff(struct device *dev)
+{
+ pr_debug("In %s\n", __func__);
+ snd_soc_poweroff(dev);
+ return 0;
+}
+#else
+#define snd_merr_dpcm_prepare NULL
+#define snd_merr_dpcm_complete NULL
+#define snd_merr_dpcm_poweroff NULL
+#endif
+
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+
+static int merr_dummy_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops merr_dummy_ops = {
+ .startup = merr_dummy_startup,
+};
+
+static int merr_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("Invoked %s for dailink %s\n", __func__, rtd->dai_link->name);
+
+ /* The DSP will convert the FE rate to 48k, stereo, 24bits */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+struct snd_soc_dai_link merr_msic_dailink[] = {
+ [MERR_DPCM_AUDIO] = {
+ .name = "Media Audio Port",
+ .stream_name = "Edison Audio",
+ .cpu_dai_name = "Headset-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "sst-platform",
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .ops = &merr_dummy_ops,
+ },
+ /* back ends */
+ {
+ .name = "SSP2-Codec",
+ .be_id = 1,
+ .cpu_dai_name = "ssp2-codec",
+ .platform_name = "sst-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_hw_params_fixup = merr_codec_fixup,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = "SSP1-BT",
+ .be_id = 2,
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .ignore_suspend = 1,
+ },
+
+};
+
+static const struct snd_soc_dapm_route map[] = {
+ { "Dummy Playback", NULL, "codec_out0" },
+ { "Dummy Playback", NULL, "codec_out1" },
+ { "codec_in0", NULL, "Dummy Capture" },
+ { "codec_in1", NULL, "Dummy Capture" },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_merr = {
+ .name = "dummy-audio",
+ .dai_link = merr_msic_dailink,
+ .num_links = ARRAY_SIZE(merr_msic_dailink),
+ .dapm_routes = map,
+ .num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_merr_dpcm_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+ pr_debug("%s enter\n", __func__);
+
+ /* register the soc card */
+ snd_soc_card_merr.dev = &pdev->dev;
+ ret_val = snd_soc_register_card(&snd_soc_card_merr);
+ if (ret_val) {
+ pr_err("snd_soc_register_card failed %d\n", ret_val);
+ return ret_val;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_merr);
+ pr_info("%s successful\n", __func__);
+ return ret_val;
+}
+
+static int snd_merr_dpcm_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+ pr_err("snd_merr_dpcm_remove");
+ snd_soc_unregister_card(soc_card);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+const struct dev_pm_ops snd_merr_dpcm_mc_pm_ops = {
+ .prepare = snd_merr_dpcm_prepare,
+ .complete = snd_merr_dpcm_complete,
+ .poweroff = snd_merr_dpcm_poweroff,
+};
+
+static struct platform_driver snd_merr_dpcm_drv = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "merr_dpcm_dummy",
+ .pm = &snd_merr_dpcm_mc_pm_ops,
+ },
+ .probe = snd_merr_dpcm_probe,
+ .remove = snd_merr_dpcm_remove,
+};
+
+module_platform_driver(snd_merr_dpcm_drv);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Edison dummy MID Machine driver");
+MODULE_AUTHOR("Michael Soares <michaelx.soares@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:merr_dpcm_dummy");
+
--- /dev/null
+/*
+ * merr_dpcm_wm8958.c - ASoc DPCM Machine driver for Intel Merrfield MID platform
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE 24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE 19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE 32768
+/* define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1 1
+#define CODEC_IN_MCLK2 2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR 0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE 4
+
+struct mrfld_8958_mc_private {
+ struct snd_soc_jack jack;
+ int jack_retry;
+ u8 pmic_id;
+ void __iomem *osc_clk0_reg;
+};
+
+
+/* set_osc_clk0- enable/disables the osc clock0
+ * addr: address of the register to write to
+ * enable: bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+ u32 osc_clk_ctrl;
+
+ osc_clk_ctrl = readl(addr);
+ if (enable)
+ osc_clk_ctrl |= BIT(31);
+ else
+ osc_clk_ctrl &= ~(BIT(31));
+
+ pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+ writel(osc_clk_ctrl, addr);
+}
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+ bool found = false;
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strstr(codec->name, "wm8994-codec")) {
+ pr_debug("codec was %s", codec->name);
+ continue;
+ } else {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ pr_warn("%s: cant find codec", __func__);
+ return NULL;
+ }
+ return codec;
+}
+
+/* TODO: find better way of doing this */
+static struct snd_soc_dai *find_codec_dai(struct snd_soc_card *card, const char *dai_name)
+{
+ int i;
+ for (i = 0; i < card->num_rtd; i++) {
+ if (!strcmp(card->rtd[i].codec_dai->name, dai_name))
+ return card->rtd[i].codec_dai;
+ }
+ pr_err("%s: unable to find codec dai\n", __func__);
+ /* this should never occur */
+ WARN_ON(1);
+ return NULL;
+}
+
+/* Function to switch the input clock for codec, When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card : Sound card structure
+ * src : Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+ struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+ int ret;
+
+ if (!aif1_dai)
+ return -ENODEV;
+
+ switch (src) {
+ case CODEC_IN_MCLK1:
+ /* Turn ON the PLL to generate required sysclk rate
+ * from MCLK1 */
+ ret = snd_soc_dai_set_pll(aif1_dai,
+ WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+ CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+ if (ret < 0) {
+ pr_err("Failed to start FLL: %d\n", ret);
+ return ret;
+ }
+ /* Switch to MCLK1 input */
+ ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+ CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("Failed to set codec sysclk configuration %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ case CODEC_IN_MCLK2:
+ /* Switch to MCLK2 */
+ ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+ 32768, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("Failed to switch to MCLK2: %d", ret);
+ return ret;
+ }
+ /* Turn off PLL for MCLK1 */
+ ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+ if (ret < 0) {
+ pr_err("Failed to stop the FLL: %d", ret);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+ unsigned int fmt;
+ int ret = 0;
+ struct snd_soc_card *card = codec_dai->card;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ /* Enable the osc clock at start so that it gets settling time */
+ set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+ pr_err("setting snd_soc_dai_set_tdm_slot\n");
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+ if (ret < 0) {
+ pr_err("can't set codec pcm format %d\n", ret);
+ return ret;
+ }
+
+ /* WM8958 slave Mode */
+ fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS;
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ pr_err("can't set codec DAI configuration %d\n", ret);
+ return ret;
+ }
+
+ /* FIXME: move this to SYS_CLOCK event handler when codec driver
+ * dependency is clean.
+ */
+ /* Switch to 19.2MHz MCLK1 input clock for codec */
+ ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+ return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ if (!strcmp(codec_dai->name, "wm8994-aif1"))
+ return mrfld_wm8958_set_clk_fmt(codec_dai);
+ return 0;
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+ return 0;
+}
+
+static const struct snd_soc_pcm_stream mrfld_wm8958_dai_params = {
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+};
+
+static int merr_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("Invoked %s for dailink %s\n", __func__, rtd->dai_link->name);
+
+ /* The DSP will covert the FE rate to 48k, stereo, 24bits */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+ int ret = 0;
+
+ if (!aif1_dai)
+ return -ENODEV;
+
+ if (dapm->dev != aif1_dai->dev)
+ return 0;
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+ ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+ card->dapm.bias_level);
+ return ret;
+}
+
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ if (!aif1_dai)
+ return -ENODEV;
+
+ if (dapm->dev != aif1_dai->dev)
+ return 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_STANDBY:
+ /* We are in stabdba down so */
+ /* Switch to 32KHz MCLK2 input clock for codec
+ */
+ ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+ /* Turn off 19.2MHz soc osc clock */
+ set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+ break;
+ default:
+ break;
+ }
+ card->dapm.bias_level = level;
+ pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+ card->dapm.bias_level);
+ return ret;
+}
+
+#define PMIC_ID_ADDR 0x00
+#define PMIC_CHIP_ID_A0_VAL 0xC0
+
+static int mrfld_8958_set_vflex_vsel(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+#define VFLEXCNT 0xAB
+#define VFLEXVSEL_5V 0x01
+#define VFLEXVSEL_B0_VSYS_PT 0x80 /* B0: Vsys pass-through */
+#define VFLEXVSEL_A0_4P5V 0x41 /* A0: 4.5V */
+
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ u8 vflexvsel, pmic_id = ctx->pmic_id;
+ int retval = 0;
+
+ pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+ vflexvsel = (pmic_id == PMIC_CHIP_ID_A0_VAL) ? VFLEXVSEL_A0_4P5V : VFLEXVSEL_B0_VSYS_PT;
+ pr_debug("pmic_id %#x vflexvsel %#x\n", pmic_id,
+ SND_SOC_DAPM_EVENT_ON(event) ? VFLEXVSEL_5V : vflexvsel);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ retval = intel_scu_ipc_iowrite8(VFLEXCNT, VFLEXVSEL_5V);
+ else if (SND_SOC_DAPM_EVENT_OFF(event))
+ retval = intel_scu_ipc_iowrite8(VFLEXCNT, vflexvsel);
+ if (retval)
+ pr_err("Error writing to VFLEXCNT register\n");
+
+ return retval;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+ SND_SOC_DAPM_SUPPLY("VFLEXCNT", SND_SOC_NOPM, 0, 0,
+ mrfld_8958_set_vflex_vsel,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+ { "Headphones", NULL, "HPOUT1L" },
+ { "Headphones", NULL, "HPOUT1R" },
+
+ /* saltbay uses 2 DMICs, other configs may use more so change below
+ * accordingly
+ */
+ { "DMIC1DAT", NULL, "DMIC" },
+ { "DMIC2DAT", NULL, "DMIC" },
+ /*{ "DMIC3DAT", NULL, "DMIC" },*/
+ /*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+ /* MICBIAS2 is connected as Bias for AMIC so we link it
+ * here. Also AMIC wires up to IN1LP pin.
+ * DMIC is externally connected to 1.8V rail, so no link rqd.
+ */
+ { "AMIC", NULL, "MICBIAS2" },
+ { "IN1LP", NULL, "AMIC" },
+
+ /* SWM map link the SWM outs to codec AIF */
+ { "AIF1 Playback", NULL, "codec_out0" },
+ { "AIF1 Playback", NULL, "codec_out1" },
+ { "codec_in0", NULL, "AIF1 Capture" },
+ { "codec_in1", NULL, "AIF1 Capture" },
+
+ { "AIF1 Playback", NULL, "VFLEXCNT" },
+ { "AIF1 Capture", NULL, "VFLEXCNT" },
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+ { 32768, true, 1, 4 },
+ { 32768, false, 1, 1 },
+ { 44100 * 256, true, 7, 10 },
+ { 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+ int best, i, sysclk, val;
+ bool idle;
+ const struct wm8958_micd_rate *rates;
+ int num_rates;
+
+ idle = !wm8994->jack_mic;
+
+ sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+ if (sysclk & WM8994_SYSCLK_SRC)
+ sysclk = wm8994->aifclk[1];
+ else
+ sysclk = wm8994->aifclk[0];
+
+ if (control->pdata.micd_rates) {
+ rates = control->pdata.micd_rates;
+ num_rates = control->pdata.num_micd_rates;
+ } else {
+ rates = micdet_rates;
+ num_rates = ARRAY_SIZE(micdet_rates);
+ }
+
+ best = 0;
+ for (i = 0; i < num_rates; i++) {
+ if (rates[i].idle != idle)
+ continue;
+ if (abs(rates[i].sysclk - sysclk) <
+ abs(rates[best].sysclk - sysclk))
+ best = i;
+ else if (rates[best].idle != idle)
+ best = i;
+ }
+
+ val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+ | rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+ dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+ rates[best].start, rates[best].rate, sysclk,
+ idle ? "idle" : "active");
+
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_BIAS_STARTTIME_MASK |
+ WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+ struct snd_soc_codec *codec = data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+ status);
+
+ /* Either nothing present or just starting detection */
+ if (!(status & WM8958_MICD_STS)) {
+ /* If nothing present then clear our statuses */
+ dev_dbg(codec->dev, "Detected open circuit\n");
+
+ schedule_delayed_work(&wm8994->open_circuit_work,
+ msecs_to_jiffies(2500));
+ return;
+ }
+
+ schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+ msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+ /* If the measurement is showing a high impedence we've got a
+ * microphone.
+ */
+ if (status & 0x600) {
+ dev_dbg(codec->dev, "Detected microphone\n");
+
+ wm8994->mic_detecting = false;
+ wm8994->jack_mic = true;
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+ SND_JACK_HEADSET);
+ }
+
+
+ if (status & 0xfc) {
+ dev_dbg(codec->dev, "Detected headphone\n");
+
+ /* Partial inserts of headsets with complete insert
+ * after an indeterminate amount of time require
+ * continouous micdetect enabled (until open circuit
+ * or headset is detected)
+ * */
+ wm8994->mic_detecting = true;
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+ SND_JACK_HEADSET);
+ }
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+ unsigned int fmt;
+ struct snd_soc_codec *codec;
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ if (!aif1_dai)
+ return -ENODEV;
+
+ pr_debug("Entry %s\n", __func__);
+
+ ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+ if (ret < 0) {
+ pr_err("can't set codec pcm format %d\n", ret);
+ return ret;
+ }
+
+ /* WM8958 slave Mode */
+ fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS;
+ ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+ if (ret < 0) {
+ pr_err("can't set codec DAI configuration %d\n", ret);
+ return ret;
+ }
+
+ mrfld_8958_set_bias_level(card, &card->dapm, SND_SOC_BIAS_OFF);
+ card->dapm.idle_bias_off = true;
+
+ /* these pins are not used in SB config so mark as nc
+ *
+ * LINEOUT1, 2
+ * IN1R
+ * DMICDAT2
+ */
+ snd_soc_dapm_nc_pin(&card->dapm, "DMIC2DAT");
+ snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1P");
+ snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1N");
+ snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2P");
+ snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2N");
+ snd_soc_dapm_nc_pin(&card->dapm, "IN1RN");
+ snd_soc_dapm_nc_pin(&card->dapm, "IN1RP");
+
+ snd_soc_dapm_sync(&card->dapm);
+
+ codec = mrfld_8958_get_codec(card);
+ if (!codec) {
+ pr_err("%s: we didnt find the codec pointer!\n", __func__);
+ return 0;
+ }
+
+ ctx->jack_retry = 0;
+ ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+ SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1,
+ &ctx->jack);
+ if (ret) {
+ pr_err("jack creation failed\n");
+ return ret;
+ }
+
+ snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+ snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+ wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+ wm8958_custom_mic_id, codec);
+
+ wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+ /* Micbias1 is always off, so for pm optimizations make sure the micbias1
+ * discharge bit is set to floating to avoid discharge in disable state
+ */
+ snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+ return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+ 8000,
+ 16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+ .count = ARRAY_SIZE(rates_8000_16000),
+ .list = rates_8000_16000,
+};
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+ .startup = mrfld_8958_startup,
+};
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+ .startup = mrfld_8958_8k_16k_startup,
+ .hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_ops mrfld_8958_be_ssp2_ops = {
+ .hw_params = mrfld_8958_hw_params,
+};
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+ .set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_msic_dailink[] = {
+ [MERR_DPCM_AUDIO] = {
+ .name = "Merrifield Audio Port",
+ .stream_name = "Saltbay Audio",
+ .cpu_dai_name = "Headset-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "sst-platform",
+ .init = mrfld_8958_init,
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .ops = &mrfld_8958_ops,
+ },
+ [MERR_DPCM_DB] = {
+ .name = "Merrifield DB Audio Port",
+ .stream_name = "Deep Buffer Audio",
+ .cpu_dai_name = "Deepbuffer-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "sst-platform",
+ .init = mrfld_8958_init,
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .ops = &mrfld_8958_ops,
+ },
+ [MERR_DPCM_LL] = {
+ .name = "Merrifield LL Audio Port",
+ .stream_name = "Low Latency Audio",
+ .cpu_dai_name = "Lowlatency-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "sst-platform",
+ .init = mrfld_8958_init,
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .ops = &mrfld_8958_ops,
+ },
+ [MERR_DPCM_COMPR] = {
+ .name = "Merrifield Compress Port",
+ .stream_name = "Saltbay Compress",
+ .platform_name = "sst-platform",
+ .cpu_dai_name = "Compress-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .dynamic = 1,
+ .init = mrfld_8958_init,
+ .compr_ops = &mrfld_compr_ops,
+ },
+ [MERR_DPCM_VOIP] = {
+ .name = "Merrifield VOIP Port",
+ .stream_name = "Saltbay Voip",
+ .cpu_dai_name = "Voip-cpu-dai",
+ .platform_name = "sst-platform",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .init = NULL,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_8k_16k_ops,
+ .dynamic = 1,
+ },
+ [MERR_DPCM_PROBE] = {
+ .name = "Merrifield Probe Port",
+ .stream_name = "Saltbay Probe",
+ .cpu_dai_name = "Probe-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-platform",
+ .playback_count = 8,
+ .capture_count = 8,
+ },
+ /* CODEC<->CODEC link */
+ {
+ .name = "Merrifield Codec-Loop Port",
+ .stream_name = "Saltbay Codec-Loop",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .params = &mrfld_wm8958_dai_params,
+ },
+
+ /* back ends */
+ {
+ .name = "SSP2-Codec",
+ .be_id = 1,
+ .cpu_dai_name = "ssp2-codec",
+ .platform_name = "sst-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .be_hw_params_fixup = merr_codec_fixup,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_be_ssp2_ops,
+ },
+ {
+ .name = "SSP1-BTFM",
+ .be_id = 2,
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .ignore_suspend = 1,
+ },
+ {
+ .name = "SSP0-Modem",
+ .be_id = 3,
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .ignore_suspend = 1,
+ },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_8958_prepare(struct device *dev)
+{
+ pr_debug("In %s device name\n", __func__);
+ snd_soc_suspend(dev);
+ return 0;
+}
+
+static void snd_mrfld_8958_complete(struct device *dev)
+{
+ pr_debug("In %s\n", __func__);
+ snd_soc_resume(dev);
+ return;
+}
+
+static int snd_mrfld_8958_poweroff(struct device *dev)
+{
+ pr_debug("In %s\n", __func__);
+ snd_soc_poweroff(dev);
+ return 0;
+}
+#else
+#define snd_mrfld_8958_prepare NULL
+#define snd_mrfld_8958_complete NULL
+#define snd_mrfld_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+ .name = "wm8958-audio",
+ .dai_link = mrfld_8958_msic_dailink,
+ .num_links = ARRAY_SIZE(mrfld_8958_msic_dailink),
+ .set_bias_level = mrfld_8958_set_bias_level,
+ .set_bias_level_post = mrfld_8958_set_bias_level_post,
+ .dapm_widgets = widgets,
+ .num_dapm_widgets = ARRAY_SIZE(widgets),
+ .dapm_routes = map,
+ .num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_8958_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+ struct mrfld_8958_mc_private *drv;
+
+ pr_debug("Entry %s\n", __func__);
+
+ drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+ if (!drv) {
+ pr_err("allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* ioremap the register */
+ drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+ MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+ MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+ if (!drv->osc_clk0_reg) {
+ pr_err("osc clk0 ctrl ioremap failed\n");
+ ret_val = -1;
+ goto unalloc;
+ }
+
+ ret_val = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &drv->pmic_id);
+ if (ret_val) {
+ pr_err("Error reading PMIC ID register\n");
+ goto unalloc;
+ }
+
+ /* register the soc card */
+ snd_soc_card_mrfld.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+ ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+ if (ret_val) {
+ pr_err("snd_soc_register_card failed %d\n", ret_val);
+ goto unalloc;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+ pr_info("%s successful\n", __func__);
+ return ret_val;
+
+unalloc:
+ kfree(drv);
+ return ret_val;
+}
+
+static int snd_mrfld_8958_mc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+ struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+ pr_debug("In %s\n", __func__);
+ kfree(drv);
+ snd_soc_card_set_drvdata(soc_card, NULL);
+ snd_soc_unregister_card(soc_card);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_8958_mc_pm_ops = {
+ .prepare = snd_mrfld_8958_prepare,
+ .complete = snd_mrfld_8958_complete,
+ .poweroff = snd_mrfld_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_8958_mc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mrfld_wm8958",
+ .pm = &snd_mrfld_8958_mc_pm_ops,
+ },
+ .probe = snd_mrfld_8958_mc_probe,
+ .remove = snd_mrfld_8958_mc_remove,
+};
+
+static int snd_mrfld_8958_driver_init(void)
+{
+ pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+ return platform_driver_register(&snd_mrfld_8958_mc_driver);
+}
+
+static void snd_mrfld_8958_driver_exit(void)
+{
+ pr_debug("In %s\n", __func__);
+ platform_driver_unregister(&snd_mrfld_8958_mc_driver);
+}
+
+static int snd_mrfld_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+ ret = snd_mrfld_8958_driver_init();
+
+out:
+ return ret;
+}
+
+static void snd_mrfld_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ snd_mrfld_8958_driver_exit();
+ dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id snd_mrfld_8958_rpmsg_id_table[] = {
+ { .name = "rpmsg_mrfld_wm8958_audio" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_8958_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = snd_mrfld_8958_rpmsg_id_table,
+ .probe = snd_mrfld_8958_rpmsg_probe,
+ .callback = snd_mrfld_8958_rpmsg_cb,
+ .remove = snd_mrfld_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_8958_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+late_initcall(snd_mrfld_8958_rpmsg_init);
+
+static void __exit snd_mrfld_8958_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+module_exit(snd_mrfld_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
--- /dev/null
+/*
+ * merr_saltbay_wm8958.c - ASoc Machine driver for Intel Merrfield MID platform
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/platform_mrfld_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+#include <asm/intel-mid.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE 24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE 19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE 32768
+/* define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1 1
+#define CODEC_IN_MCLK2 2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR 0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE 4
+
+struct mrfld_8958_mc_private {
+ struct snd_soc_jack jack;
+ int jack_retry;
+ u8 pmic_id;
+ void __iomem *osc_clk0_reg;
+};
+
+
+/* set_osc_clk0- enable/disables the osc clock0
+ * addr: address of the register to write to
+ * enable: bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+ u32 osc_clk_ctrl;
+
+ osc_clk_ctrl = readl(addr);
+ if (enable)
+ osc_clk_ctrl |= BIT(31);
+ else
+ osc_clk_ctrl &= ~(BIT(31));
+
+ pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+ writel(osc_clk_ctrl, addr);
+}
+
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+ bool found = false;
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strstr(codec->name, "wm8994-codec")) {
+ pr_debug("codec was %s", codec->name);
+ continue;
+ } else {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ pr_err("%s: cant find codec", __func__);
+ return NULL;
+ }
+ return codec;
+}
+
+/* Function to switch the input clock for codec, When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card : Sound card structure
+ * src : Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+ struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+ int ret;
+
+ switch (src) {
+ case CODEC_IN_MCLK1:
+ /* Turn ON the PLL to generate required sysclk rate
+ * from MCLK1 */
+ ret = snd_soc_dai_set_pll(aif1_dai,
+ WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+ CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+ if (ret < 0) {
+ pr_err("Failed to start FLL: %d\n", ret);
+ return ret;
+ }
+ /* Switch to MCLK1 input */
+ ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+ CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("Failed to set codec sysclk configuration %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ case CODEC_IN_MCLK2:
+ /* Switch to MCLK2 */
+ ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+ 32768, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("Failed to switch to MCLK2: %d", ret);
+ return ret;
+ }
+ /* Turn off PLL for MCLK1 */
+ ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+ if (ret < 0) {
+ pr_err("Failed to stop the FLL: %d", ret);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+ unsigned int fmt;
+ int ret = 0;
+ struct snd_soc_card *card = codec_dai->card;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ /* Enable the osc clock at start so that it gets settling time */
+ set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+ if (ret < 0) {
+ pr_err("can't set codec pcm format %d\n", ret);
+ return ret;
+ }
+
+ /* WM8958 slave Mode */
+ fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS;
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ pr_err("can't set codec DAI configuration %d\n", ret);
+ return ret;
+ }
+
+ /* FIXME: move this to SYS_CLOCK event handler when codec driver
+ * dependency is clean.
+ */
+ /* Switch to 19.2MHz MCLK1 input clock for codec */
+ ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+ return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+ int ret = 0;
+
+ if (dapm->dev != aif1_dai->dev)
+ return 0;
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+ ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+ card->dapm.bias_level);
+ return ret;
+}
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ if (dapm->dev != aif1_dai->dev)
+ return 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_STANDBY:
+ /* We are in stabdba down so */
+ /* Switch to 32KHz MCLK2 input clock for codec
+ */
+ ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+ /* Turn off 19.2MHz soc osc clock */
+ set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+ break;
+ default:
+ break;
+ }
+ card->dapm.bias_level = level;
+ pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+ card->dapm.bias_level);
+ return ret;
+}
+
+#define PMIC_ID_ADDR 0x00
+#define PMIC_CHIP_ID_A0_VAL 0xC0
+
+static int mrfld_8958_set_vflex_vsel(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+#define VFLEXCNT 0xAB
+#define VFLEXVSEL_5V 0x01
+#define VFLEXVSEL_B0_VSYS_PT 0x80 /* B0: Vsys pass-through */
+#define VFLEXVSEL_A0_4P5V 0x41 /* A0: 4.5V */
+
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ u8 vflexvsel, pmic_id = ctx->pmic_id;
+ int retval = 0;
+
+ pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+ vflexvsel = (pmic_id == PMIC_CHIP_ID_A0_VAL) ? VFLEXVSEL_A0_4P5V : VFLEXVSEL_B0_VSYS_PT;
+ pr_debug("pmic_id %#x vflexvsel %#x\n", pmic_id,
+ SND_SOC_DAPM_EVENT_ON(event) ? VFLEXVSEL_5V : vflexvsel);
+
+ /*FIXME: seems to be issue with bypass mode in MOOR, for now
+ force the bias off volate as VFLEXVSEL_5V */
+ if ((INTEL_MID_BOARD(1, PHONE, MOFD)) ||
+ (INTEL_MID_BOARD(1, TABLET, MOFD)))
+ vflexvsel = VFLEXVSEL_5V;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ retval = intel_scu_ipc_iowrite8(VFLEXCNT, VFLEXVSEL_5V);
+ else if (SND_SOC_DAPM_EVENT_OFF(event))
+ retval = intel_scu_ipc_iowrite8(VFLEXCNT, vflexvsel);
+ if (retval)
+ pr_err("Error writing to VFLEXCNT register\n");
+
+ return retval;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+ SND_SOC_DAPM_SUPPLY("VFLEXCNT", SND_SOC_NOPM, 0, 0,
+ mrfld_8958_set_vflex_vsel,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+ { "Headphones", NULL, "HPOUT1L" },
+ { "Headphones", NULL, "HPOUT1R" },
+
+ /* saltbay uses 2 DMICs, other configs may use more so change below
+ * accordingly
+ */
+ { "DMIC1DAT", NULL, "DMIC" },
+ { "DMIC2DAT", NULL, "DMIC" },
+ /*{ "DMIC3DAT", NULL, "DMIC" },*/
+ /*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+ /* MICBIAS2 is connected as Bias for AMIC so we link it
+ * here. Also AMIC wires up to IN1LP pin.
+ * DMIC is externally connected to 1.8V rail, so no link rqd.
+ */
+ { "AMIC", NULL, "MICBIAS2" },
+ { "IN1LP", NULL, "AMIC" },
+
+ /* SWM map link the SWM outs to codec AIF */
+ { "AIF1DAC1L", NULL, "Codec OUT0" },
+ { "AIF1DAC1R", NULL, "Codec OUT0" },
+ { "AIF1DAC2L", NULL, "Codec OUT1" },
+ { "AIF1DAC2R", NULL, "Codec OUT1" },
+ { "Codec IN0", NULL, "AIF1ADC1L" },
+ { "Codec IN0", NULL, "AIF1ADC1R" },
+ { "Codec IN1", NULL, "AIF1ADC1L" },
+ { "Codec IN1", NULL, "AIF1ADC1R" },
+
+ { "AIF1DAC1L", NULL, "VFLEXCNT" },
+ { "AIF1DAC1R", NULL, "VFLEXCNT" },
+ { "AIF1DAC2L", NULL, "VFLEXCNT" },
+ { "AIF1DAC2R", NULL, "VFLEXCNT" },
+
+ { "AIF1ADC1L", NULL, "VFLEXCNT" },
+ { "AIF1ADC1R", NULL, "VFLEXCNT" },
+
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+ { 32768, true, 1, 4 },
+ { 32768, false, 1, 1 },
+ { 44100 * 256, true, 7, 10 },
+ { 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+ int best, i, sysclk, val;
+ bool idle;
+ const struct wm8958_micd_rate *rates;
+ int num_rates;
+
+ idle = !wm8994->jack_mic;
+
+ sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+ if (sysclk & WM8994_SYSCLK_SRC)
+ sysclk = wm8994->aifclk[1];
+ else
+ sysclk = wm8994->aifclk[0];
+
+ if (control->pdata.micd_rates) {
+ rates = control->pdata.micd_rates;
+ num_rates = control->pdata.num_micd_rates;
+ } else {
+ rates = micdet_rates;
+ num_rates = ARRAY_SIZE(micdet_rates);
+ }
+
+ best = 0;
+ for (i = 0; i < num_rates; i++) {
+ if (rates[i].idle != idle)
+ continue;
+ if (abs(rates[i].sysclk - sysclk) <
+ abs(rates[best].sysclk - sysclk))
+ best = i;
+ else if (rates[best].idle != idle)
+ best = i;
+ }
+
+ val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+ | rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+ dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+ rates[best].start, rates[best].rate, sysclk,
+ idle ? "idle" : "active");
+
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_BIAS_STARTTIME_MASK |
+ WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+ struct snd_soc_codec *codec = data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+ status);
+
+ /* Either nothing present or just starting detection */
+ if (!(status & WM8958_MICD_STS)) {
+ /* If nothing present then clear our statuses */
+ dev_dbg(codec->dev, "Detected open circuit\n");
+
+ schedule_delayed_work(&wm8994->open_circuit_work,
+ msecs_to_jiffies(2500));
+ return;
+ }
+
+ schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+ msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+ /* If the measurement is showing a high impedence we've got a
+ * microphone.
+ */
+ if (status & 0x600) {
+ dev_dbg(codec->dev, "Detected microphone\n");
+
+ wm8994->mic_detecting = false;
+ wm8994->jack_mic = true;
+ wm8994->headphone_detected = false;
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+ SND_JACK_HEADSET);
+ }
+
+
+ if (status & 0xfc) {
+ dev_dbg(codec->dev, "Detected headphone\n");
+
+ /* Partial inserts of headsets with complete insert
+ * after an indeterminate amount of time require
+ * continouous micdetect enabled (until open circuit
+ * or headset is detected)
+ * */
+ wm8994->mic_detecting = true;
+
+ wm8994->jack_mic = false;
+ wm8994->headphone_detected = true;
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+ SND_JACK_HEADSET);
+ }
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+ unsigned int fmt;
+ struct snd_soc_codec *codec = runtime->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+ struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ pr_debug("Entry %s\n", __func__);
+
+ ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+ if (ret < 0) {
+ pr_err("can't set codec pcm format %d\n", ret);
+ return ret;
+ }
+
+ /* WM8958 slave Mode */
+ fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS;
+ ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+ if (ret < 0) {
+ pr_err("can't set codec DAI configuration %d\n", ret);
+ return ret;
+ }
+
+ mrfld_8958_set_bias_level(card, dapm, SND_SOC_BIAS_OFF);
+ card->dapm.idle_bias_off = true;
+
+ /* these pins are not used in SB config so mark as nc
+ *
+ * LINEOUT1, 2
+ * IN1R
+ * DMICDAT2
+ */
+ snd_soc_dapm_nc_pin(dapm, "DMIC2DAT");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+ snd_soc_dapm_nc_pin(dapm, "IN1RN");
+ snd_soc_dapm_nc_pin(dapm, "IN1RP");
+
+ /* Force enable VMID to avoid cold latency constraints */
+ snd_soc_dapm_force_enable_pin(dapm, "VMID");
+ snd_soc_dapm_sync(dapm);
+
+ ctx->jack_retry = 0;
+ ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+ SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1,
+ &ctx->jack);
+ if (ret) {
+ pr_err("jack creation failed\n");
+ return ret;
+ }
+
+ snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+ snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+ wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+ wm8958_custom_mic_id, codec);
+
+ wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+ /* Micbias1 is always off, so for pm optimizations make sure the micbias1
+ * discharge bit is set to floating to avoid discharge in disable state
+ */
+ snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+ return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+ 8000,
+ 16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+ .count = ARRAY_SIZE(rates_8000_16000),
+ .list = rates_8000_16000,
+};
+
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+ .startup = mrfld_8958_startup,
+ .hw_params = mrfld_8958_hw_params,
+};
+
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+ .startup = mrfld_8958_8k_16k_startup,
+ .hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+ .set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_msic_dailink[] = {
+ [MERR_SALTBAY_AUDIO] = {
+ .name = "Merrifield Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "Headset-cpu-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .platform_name = "sst-platform",
+ .init = mrfld_8958_init,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_ops,
+ .playback_count = 3,
+ },
+ [MERR_SALTBAY_COMPR] = {
+ .name = "Merrifield Compress Port",
+ .stream_name = "Compress",
+ .platform_name = "sst-platform",
+ .cpu_dai_name = "Compress-cpu-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .compr_ops = &mrfld_compr_ops,
+ },
+ [MERR_SALTBAY_VOIP] = {
+ .name = "Merrifield VOIP Port",
+ .stream_name = "Voip",
+ .cpu_dai_name = "Voip-cpu-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_8k_16k_ops,
+ },
+ [MERR_SALTBAY_PROBE] = {
+ .name = "Merrifield Probe Port",
+ .stream_name = "Probe",
+ .cpu_dai_name = "Probe-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-platform",
+ .playback_count = 8,
+ .capture_count = 8,
+ },
+ [MERR_SALTBAY_AWARE] = {
+ .name = "Merrifield Aware Port",
+ .stream_name = "Aware",
+ .cpu_dai_name = "Loopback-cpu-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_8k_16k_ops,
+ },
+ [MERR_SALTBAY_VAD] = {
+ .name = "Merrifield VAD Port",
+ .stream_name = "Vad",
+ .cpu_dai_name = "Loopback-cpu-dai",
+ .codec_dai_name = "wm8994-aif1",
+ .codec_name = "wm8994-codec",
+ .platform_name = "sst-platform",
+ .init = NULL,
+ .ignore_suspend = 1,
+ .ops = &mrfld_8958_8k_16k_ops,
+ },
+ [MERR_SALTBAY_POWER] = {
+ .name = "Virtual Power Port",
+ .stream_name = "Power",
+ .cpu_dai_name = "Power-cpu-dai",
+ .platform_name = "sst-platform",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_8958_prepare(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+ struct snd_soc_codec *codec;
+ struct snd_soc_dapm_context *dapm;
+
+ pr_debug("In %s\n", __func__);
+
+ codec = mrfld_8958_get_codec(card);
+ if (!codec) {
+ pr_err("%s: couldn't find the codec pointer!\n", __func__);
+ return -EAGAIN;
+ }
+
+ pr_debug("found codec %s\n", codec->name);
+ dapm = &codec->dapm;
+
+ snd_soc_dapm_disable_pin(dapm, "VMID");
+ snd_soc_dapm_sync(dapm);
+
+ snd_soc_suspend(dev);
+ return 0;
+}
+
+static void snd_mrfld_8958_complete(struct device *dev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(dev);
+ struct snd_soc_codec *codec;
+ struct snd_soc_dapm_context *dapm;
+
+ pr_debug("In %s\n", __func__);
+
+ codec = mrfld_8958_get_codec(card);
+ if (!codec) {
+ pr_err("%s: couldn't find the codec pointer!\n", __func__);
+ return;
+ }
+
+ pr_debug("found codec %s\n", codec->name);
+ dapm = &codec->dapm;
+
+ snd_soc_dapm_force_enable_pin(dapm, "VMID");
+ snd_soc_dapm_sync(dapm);
+
+ snd_soc_resume(dev);
+ return;
+}
+
+static int snd_mrfld_8958_poweroff(struct device *dev)
+{
+ pr_debug("In %s\n", __func__);
+ snd_soc_poweroff(dev);
+ return 0;
+}
+#else
+#define snd_mrfld_8958_prepare NULL
+#define snd_mrfld_8958_complete NULL
+#define snd_mrfld_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+ .name = "wm8958-audio",
+ .dai_link = mrfld_8958_msic_dailink,
+ .num_links = ARRAY_SIZE(mrfld_8958_msic_dailink),
+ .set_bias_level = mrfld_8958_set_bias_level,
+ .set_bias_level_post = mrfld_8958_set_bias_level_post,
+ .dapm_widgets = widgets,
+ .num_dapm_widgets = ARRAY_SIZE(widgets),
+ .dapm_routes = map,
+ .num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_8958_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+ struct mrfld_8958_mc_private *drv;
+
+ pr_debug("Entry %s\n", __func__);
+
+ drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+ if (!drv) {
+ pr_err("allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* ioremap the register */
+ drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+ MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+ MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+ if (!drv->osc_clk0_reg) {
+ pr_err("osc clk0 ctrl ioremap failed\n");
+ ret_val = -1;
+ goto unalloc;
+ }
+
+ ret_val = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &drv->pmic_id);
+ if (ret_val) {
+ pr_err("Error reading PMIC ID register\n");
+ goto unalloc;
+ }
+
+ /* register the soc card */
+ snd_soc_card_mrfld.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+ ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+ if (ret_val) {
+ pr_err("snd_soc_register_card failed %d\n", ret_val);
+ goto unalloc;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+ pr_info("%s successful\n", __func__);
+ return ret_val;
+
+unalloc:
+ kfree(drv);
+ return ret_val;
+}
+
+static int snd_mrfld_8958_mc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+ struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+ pr_debug("In %s\n", __func__);
+ kfree(drv);
+ snd_soc_card_set_drvdata(soc_card, NULL);
+ snd_soc_unregister_card(soc_card);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_8958_mc_pm_ops = {
+ .prepare = snd_mrfld_8958_prepare,
+ .complete = snd_mrfld_8958_complete,
+ .poweroff = snd_mrfld_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_8958_mc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mrfld_wm8958",
+ .pm = &snd_mrfld_8958_mc_pm_ops,
+ },
+ .probe = snd_mrfld_8958_mc_probe,
+ .remove = snd_mrfld_8958_mc_remove,
+};
+
+static int snd_mrfld_8958_driver_init(void)
+{
+ pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+ return platform_driver_register(&snd_mrfld_8958_mc_driver);
+}
+
+static void snd_mrfld_8958_driver_exit(void)
+{
+ pr_debug("In %s\n", __func__);
+ platform_driver_unregister(&snd_mrfld_8958_mc_driver);
+}
+
+static int snd_mrfld_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+ int ret = 0;
+
+ if (rpdev == NULL) {
+ pr_err("rpmsg channel not created\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+ ret = snd_mrfld_8958_driver_init();
+
+out:
+ return ret;
+}
+
+static void snd_mrfld_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+ snd_mrfld_8958_driver_exit();
+ dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "unexpected, message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id snd_mrfld_8958_rpmsg_id_table[] = {
+ { .name = "rpmsg_mrfld_wm8958_audio" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_8958_rpmsg = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = snd_mrfld_8958_rpmsg_id_table,
+ .probe = snd_mrfld_8958_rpmsg_probe,
+ .callback = snd_mrfld_8958_rpmsg_cb,
+ .remove = snd_mrfld_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_8958_rpmsg_init(void)
+{
+ return register_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+late_initcall(snd_mrfld_8958_rpmsg_init);
+
+static void __exit snd_mrfld_8958_rpmsg_exit(void)
+{
+ return unregister_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+module_exit(snd_mrfld_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
--- /dev/null
+/*
+ * compress.c - Intel MID Platform driver for Compress stream operations
+ *
+ * Copyright (C) 2010-2013 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+static void sst_compr_fragment_elapsed(void *arg)
+{
+ struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+ pr_debug("fragment elapsed by driver\n");
+ if (cstream)
+ snd_compr_fragment_elapsed(cstream);
+}
+
+static void sst_drain_notify(void *arg)
+{
+ struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+ pr_debug("drain notify by driver\n");
+ if (cstream)
+ snd_compr_drain_notify(cstream);
+}
+
+static int sst_platform_compr_open(struct snd_compr_stream *cstream)
+{
+
+ int ret_val = 0;
+ struct snd_compr_runtime *runtime = cstream->runtime;
+ struct sst_runtime_stream *stream;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+ pr_debug("%s called:%s\n", __func__, dai_link->cpu_dai_name);
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ spin_lock_init(&stream->status_lock);
+
+ /* get the sst ops */
+ if (!sst_dsp || !try_module_get(sst_dsp->dev->driver->owner)) {
+ pr_err("no device available to run\n");
+ ret_val = -ENODEV;
+ goto out_ops;
+ }
+ stream->compr_ops = sst_dsp->compr_ops;
+
+ stream->id = 0;
+ sst_set_stream_status(stream, SST_PLATFORM_INIT);
+ runtime->private_data = stream;
+ return 0;
+out_ops:
+ kfree(stream);
+ return ret_val;
+}
+
+static int sst_platform_compr_free(struct snd_compr_stream *cstream)
+{
+ struct sst_runtime_stream *stream;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ int ret_val = 0, str_id;
+
+ stream = cstream->runtime->private_data;
+ /*need to check*/
+ str_id = stream->id;
+ if (str_id)
+ ret_val = stream->compr_ops->close(str_id);
+ module_put(sst_dsp->dev->driver->owner);
+ kfree(stream);
+ pr_debug("%s called for dai %s: ret = %d\n", __func__,
+ dai_link->cpu_dai_name, ret_val);
+ return 0;
+}
+
+static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
+{
+ struct sst_runtime_stream *stream;
+ int retval = 0;
+ struct snd_sst_params str_params;
+ struct sst_compress_cb cb;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+ pr_debug("In function %s\n", __func__);
+ stream = cstream->runtime->private_data;
+ /* construct fw structure for this*/
+ memset(&str_params, 0, sizeof(str_params));
+
+ /* fill the device type and stream id to pass to SST driver */
+ retval = sst_fill_stream_params(cstream, ctx, &str_params, true);
+ pr_debug("compr_set_params: fill stream params ret_val = 0x%x\n", retval);
+ if (retval < 0)
+ return retval;
+
+ switch (params->codec.id) {
+ case SND_AUDIOCODEC_MP3: {
+ str_params.codec = SST_CODEC_TYPE_MP3;
+ str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in;
+ str_params.sparams.uc.mp3_params.pcm_wd_sz = 16;
+ break;
+ }
+
+ case SND_AUDIOCODEC_AAC: {
+ str_params.codec = SST_CODEC_TYPE_AAC;
+ str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in;
+ str_params.sparams.uc.aac_params.pcm_wd_sz = 16;
+ if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS)
+ str_params.sparams.uc.aac_params.bs_format =
+ AAC_BIT_STREAM_ADTS;
+ else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW)
+ str_params.sparams.uc.aac_params.bs_format =
+ AAC_BIT_STREAM_RAW;
+ else {
+ pr_err("Undefined format%d\n", params->codec.format);
+ return -EINVAL;
+ }
+ str_params.sparams.uc.aac_params.externalsr =
+ params->codec.sample_rate;
+ break;
+ }
+
+ default:
+ pr_err("codec not supported, id =%d\n", params->codec.id);
+ return -EINVAL;
+ }
+
+ str_params.aparams.ring_buf_info[0].addr =
+ virt_to_phys(cstream->runtime->buffer);
+ str_params.aparams.ring_buf_info[0].size =
+ cstream->runtime->buffer_size;
+ str_params.aparams.sg_count = 1;
+ str_params.aparams.frag_size = cstream->runtime->fragment_size;
+
+ cb.param = cstream;
+ cb.compr_cb = sst_compr_fragment_elapsed;
+ cb.drain_cb_param = cstream;
+ cb.drain_notify = sst_drain_notify;
+
+ retval = stream->compr_ops->open(&str_params, &cb);
+ if (retval < 0) {
+ pr_err("stream allocation failed %d\n", retval);
+ return retval;
+ }
+
+ stream->id = retval;
+ return 0;
+}
+
+static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+{
+ struct sst_runtime_stream *stream =
+ cstream->runtime->private_data;
+
+ return stream->compr_ops->control(cmd, stream->id);
+}
+
+static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp)
+{
+ struct sst_runtime_stream *stream;
+
+ stream = cstream->runtime->private_data;
+ stream->compr_ops->tstamp(stream->id, tstamp);
+ tstamp->byte_offset = tstamp->copied_total %
+ (u32)cstream->runtime->buffer_size;
+ pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
+ return 0;
+}
+
+static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
+ size_t bytes)
+{
+ struct sst_runtime_stream *stream;
+
+ stream = cstream->runtime->private_data;
+ stream->compr_ops->ack(stream->id, (unsigned long)bytes);
+ stream->bytes_written += bytes;
+
+ return 0;
+}
+
+static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps)
+{
+ struct sst_runtime_stream *stream =
+ cstream->runtime->private_data;
+
+ return stream->compr_ops->get_caps(caps);
+}
+
+static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_codec_caps *codec)
+{
+ struct sst_runtime_stream *stream =
+ cstream->runtime->private_data;
+
+ return stream->compr_ops->get_codec_caps(codec);
+}
+
+static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
+{
+ struct sst_runtime_stream *stream =
+ cstream->runtime->private_data;
+
+ return stream->compr_ops->set_metadata(stream->id, metadata);
+}
+
+struct snd_compr_ops sst_platform_compr_ops = {
+
+ .open = sst_platform_compr_open,
+ .free = sst_platform_compr_free,
+ .set_params = sst_platform_compr_set_params,
+ .set_metadata = sst_platform_compr_set_metadata,
+ .trigger = sst_platform_compr_trigger,
+ .pointer = sst_platform_compr_pointer,
+ .ack = sst_platform_compr_ack,
+ .get_caps = sst_platform_compr_get_caps,
+ .get_codec_caps = sst_platform_compr_get_codec_caps,
+};
--- /dev/null
+/*
+ * effects.c - platform file for effects interface
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Authors: Samreen Nilofer <samreen.nilofer@intel.com>
+ * Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include <linux/slab.h>
+#include <asm/platform_sst_audio.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+extern struct sst_device *sst_dsp;
+extern struct device *sst_pdev;
+
+struct effect_uuid {
+ uint32_t timeLow;
+ uint16_t timeMid;
+ uint16_t timeHiAndVersion;
+ uint16_t clockSeq;
+ uint8_t node[6];
+};
+
+#define EFFECT_STRING_LEN_MAX 64
+
+enum sst_effect {
+ EFFECTS_CREATE = 0,
+ EFFECTS_DESTROY,
+ EFFECTS_SET_PARAMS,
+ EFFECTS_GET_PARAMS,
+};
+
+enum sst_mixer_output_mode {
+ SST_MEDIA0_OUT,
+ SST_MEDIA1_OUT,
+};
+
+static inline void sst_fill_byte_stream(struct snd_sst_bytes_v2 *bytes, u8 type,
+ u8 msg, u8 block, u8 task, u8 pipe_id, u16 len,
+ struct ipc_effect_payload *payload)
+{
+ u32 size = sizeof(struct ipc_effect_dsp_hdr);
+
+ bytes->type = type;
+ bytes->ipc_msg = msg;
+ bytes->block = block;
+ bytes->task_id = task;
+ bytes->pipe_id = pipe_id;
+ bytes->len = len;
+
+ /* Copy the ipc_effect_dsp_hdr followed by the data */
+ memcpy(bytes->bytes, payload, size);
+ memcpy(bytes->bytes + size, payload->data, len - size);
+}
+
+static int sst_send_effects(struct ipc_effect_payload *dsp_payload, int data_len,
+ enum sst_effect effect_type)
+{
+ struct snd_sst_bytes_v2 *bytes;
+ u32 len;
+ int ret;
+ u8 type, msg = IPC_INVALID, pipe, payload_len;
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ len = sizeof(*bytes) + sizeof(struct ipc_effect_dsp_hdr) + data_len;
+
+ bytes = kzalloc(len, GFP_KERNEL);
+ if (!bytes) {
+ pr_err("kzalloc failed allocate bytes\n");
+ return -ENOMEM;
+ }
+
+ switch (effect_type) {
+ case EFFECTS_CREATE:
+ case EFFECTS_DESTROY:
+ type = SND_SST_BYTES_SET;
+ msg = IPC_CMD;
+ break;
+
+ case EFFECTS_SET_PARAMS:
+ type = SND_SST_BYTES_SET;
+ msg = IPC_SET_PARAMS;
+ break;
+
+ case EFFECTS_GET_PARAMS:
+ type = SND_SST_BYTES_GET;
+ msg = IPC_GET_PARAMS;
+ break;
+ default:
+ pr_err("No such effect %#x", effect_type);
+ ret = -EINVAL;
+ goto free_bytes;
+ }
+
+ pipe = dsp_payload->dsp_hdr.pipe_id;
+ payload_len = sizeof(struct ipc_effect_dsp_hdr) + data_len;
+ sst_fill_byte_stream(bytes, type, msg, 1, SST_TASK_ID_MEDIA,
+ pipe, payload_len, dsp_payload);
+
+ mutex_lock(&sst->lock);
+ ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, bytes);
+ mutex_unlock(&sst->lock);
+
+ if (ret) {
+ pr_err("byte_stream failed err %d pipe_id %#x\n", ret,
+ dsp_payload->dsp_hdr.pipe_id);
+ goto free_bytes;
+ }
+
+ /* Copy only the data - skip the dsp header */
+ if (msg == IPC_GET_PARAMS)
+ memcpy(dsp_payload->data, bytes->bytes, data_len);
+
+free_bytes:
+ kfree(bytes);
+ return ret;
+}
+
+static int sst_get_algo_id(const struct sst_dev_effects *pdev_effs,
+ char *uuid, u16 *algo_id)
+{
+ int i, len;
+
+ len = pdev_effs->effs_num_map;
+
+ for (i = 0; i < len; i++) {
+ if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+ *algo_id = pdev_effs->effs_map[i].algo_id;
+ return 0;
+ }
+ }
+ pr_err("no such uuid\n");
+ return -EINVAL;
+}
+
+static int sst_fill_effects_info(const struct sst_dev_effects *pdev_effs,
+ char *uuid, u16 pos,
+ struct ipc_dsp_effects_info *effs_info, u16 cmd_id)
+{
+ int i, len;
+
+ len = pdev_effs->effs_num_map;
+
+ for (i = 0; i < len; i++) {
+ if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+
+ effs_info->cmd_id = cmd_id;
+ effs_info->length = (sizeof(struct ipc_dsp_effects_info) -
+ offsetof(struct ipc_dsp_effects_info, sel_pos));
+ effs_info->sel_pos = pos;
+ effs_info->sel_algo_id = pdev_effs->effs_map[i].algo_id;
+ effs_info->cpu_load = pdev_effs->effs_res_map[i].cpuLoad;
+ effs_info->memory_usage = pdev_effs->effs_res_map[i].memoryUsage;
+ effs_info->flags = pdev_effs->effs_res_map[i].flags;
+
+ return 0;
+ }
+ }
+
+ pr_err("no such uuid\n");
+ return -EINVAL;
+}
+
+static inline void sst_fill_dsp_payload(struct ipc_effect_payload *dsp_payload,
+ u8 pipe_id, u16 mod_id, char *data)
+{
+ dsp_payload->dsp_hdr.mod_index_id = 0xFF;
+ dsp_payload->dsp_hdr.pipe_id = pipe_id;
+ dsp_payload->dsp_hdr.mod_id = mod_id;
+ dsp_payload->data = data;
+}
+
+static int sst_get_pipe_id(struct sst_dev_stream_map *map, int map_size,
+ int dev, int mode, u8 *pipe_id)
+{
+ int index;
+
+ if (map == NULL)
+ return -EINVAL;
+
+ /* In case of global effects, dev will be 0xff */
+ if (dev == 0xFF) {
+ *pipe_id = (mode == SST_MEDIA0_OUT) ? PIPE_MEDIA0_OUT : PIPE_MEDIA1_OUT;
+ return 0;
+ }
+
+ for (index = 1; index < map_size; index++) {
+ if (map[index].dev_num == dev) {
+ *pipe_id = map[index].device_id;
+ break;
+ }
+ }
+
+ if (index == map_size) {
+ pr_err("no such device %d\n", dev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int sst_effects_create(struct snd_card *card, struct snd_effect *effect)
+{
+ int ret = 0;
+ u8 pipe_id;
+ struct ipc_effect_payload dsp_payload;
+ struct ipc_dsp_effects_info effects_info;
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+ &effects_info, IPC_EFFECTS_CREATE);
+ if (ret < 0)
+ return ret;
+
+ ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+ sst->pdata->strm_map_size,
+ effect->device, effect->mode, &pipe_id);
+ if (ret < 0)
+ return ret;
+
+ sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+ ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_CREATE);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sst_effects_destroy(struct snd_card *card, struct snd_effect *effect)
+{
+ int ret = 0;
+ u8 pipe_id;
+ struct ipc_effect_payload dsp_payload;
+ struct ipc_dsp_effects_info effects_info;
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+ &effects_info, IPC_EFFECTS_DESTROY);
+ if (ret < 0)
+ return ret;
+
+ ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+ sst->pdata->strm_map_size,
+ effect->device, effect->mode, &pipe_id);
+ if (ret < 0)
+ return ret;
+
+ sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+ ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_DESTROY);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sst_effects_set_params(struct snd_card *card,
+ struct snd_effect_params *params)
+{
+ int ret = 0;
+ u8 pipe_id;
+ u16 algo_id;
+ struct ipc_effect_payload dsp_payload;
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+ if (ret < 0)
+ return ret;
+
+ ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+ sst->pdata->strm_map_size,
+ params->device, SST_MEDIA0_OUT, &pipe_id);
+ if (ret < 0)
+ return ret;
+
+ sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id, params->buffer);
+
+ ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_SET_PARAMS);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sst_effects_get_params(struct snd_card *card,
+ struct snd_effect_params *params)
+{
+ int ret = 0;
+ u8 pipe_id;
+ u16 algo_id;
+ struct ipc_effect_payload dsp_payload;
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+ if (ret < 0)
+ return ret;
+
+ ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+ sst->pdata->strm_map_size,
+ params->device, SST_MEDIA0_OUT, &pipe_id);
+ if (ret < 0)
+ return ret;
+
+ sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id, params->buffer);
+
+ ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_GET_PARAMS);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sst_query_num_effects(struct snd_card *card)
+{
+ struct sst_data *sst;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ return sst->pdata->pdev_effs.effs_num_map;
+}
+
+static int sst_query_effects_caps(struct snd_card *card,
+ struct snd_effect_caps *caps)
+{
+ struct sst_data *sst;
+ struct sst_dev_effects_map *effs_map;
+ unsigned int num_effects, offset = 0;
+ char *dstn;
+ int i;
+
+ if (!sst_pdev)
+ return -ENODEV;
+ sst = dev_get_drvdata(sst_pdev);
+
+ effs_map = sst->pdata->pdev_effs.effs_map;
+ num_effects = sst->pdata->pdev_effs.effs_num_map;
+
+ if (caps->size < (num_effects * MAX_DESCRIPTOR_SIZE)) {
+ pr_err("buffer size is insufficient\n");
+ return -ENOMEM;
+ }
+
+ dstn = caps->buffer;
+ for (i = 0; i < num_effects; i++) {
+ memcpy(dstn + offset, effs_map[i].descriptor, MAX_DESCRIPTOR_SIZE);
+ offset += MAX_DESCRIPTOR_SIZE;
+ }
+ caps->size = offset;
+
+ return 0;
+}
+
+struct snd_effect_ops effects_ops = {
+ .create = sst_effects_create,
+ .destroy = sst_effects_destroy,
+ .set_params = sst_effects_set_params,
+ .get_params = sst_effects_get_params,
+ .query_num_effects = sst_query_num_effects,
+ .query_effect_caps = sst_query_effects_caps,
+};
--- /dev/null
+/*
+ * pcm.c - Intel MID Platform driver file implementing PCM functionality
+ *
+ * Copyright (C) 2010-2013 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+struct device *sst_pdev;
+struct sst_device *sst_dsp;
+extern struct snd_compr_ops sst_platform_compr_ops;
+extern struct snd_effect_ops effects_ops;
+
+static DEFINE_MUTEX(sst_dsp_lock);
+
+static struct snd_pcm_hardware sst_platform_pcm_hw = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_DOUBLE |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_MMAP|
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
+ SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
+ SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
+ .rates = (SNDRV_PCM_RATE_8000|
+ SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000),
+ .rate_min = SST_MIN_RATE,
+ .rate_max = SST_MAX_RATE,
+ .channels_min = SST_MIN_CHANNEL,
+ .channels_max = SST_MAX_CHANNEL,
+ .buffer_bytes_max = SST_MAX_BUFFER,
+ .period_bytes_min = SST_MIN_PERIOD_BYTES,
+ .period_bytes_max = SST_MAX_PERIOD_BYTES,
+ .periods_min = SST_MIN_PERIODS,
+ .periods_max = SST_MAX_PERIODS,
+ .fifo_size = SST_FIFO_SIZE,
+};
+
+static int sst_platform_ihf_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width) {
+ struct snd_sst_runtime_params params_data;
+ int channels = slots;
+
+ /* registering with SST driver to get access to SST APIs to use */
+ if (!sst_dsp) {
+ pr_err("sst: DSP not registered\n");
+ return -EIO;
+ }
+ params_data.type = SST_SET_CHANNEL_INFO;
+ params_data.str_id = SND_SST_DEVICE_IHF;
+ params_data.size = sizeof(channels);
+ params_data.addr = &channels;
+ return sst_dsp->ops->set_generic_params(SST_SET_RUNTIME_PARAMS,
+ (void *)¶ms_data);
+}
+
+static int sst_media_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+
+ pr_debug("%s: enter, mute=%d dai-name=%s dir=%d\n", __func__, mute, dai->name, stream);
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+ sst_send_pipe_gains(dai, stream, mute);
+#endif
+
+ return 0;
+}
+
+/* helper functions */
+void sst_set_stream_status(struct sst_runtime_stream *stream,
+ int state)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&stream->status_lock, flags);
+ stream->stream_status = state;
+ spin_unlock_irqrestore(&stream->status_lock, flags);
+}
+
+static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
+{
+ int state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->status_lock, flags);
+ state = stream->stream_status;
+ spin_unlock_irqrestore(&stream->status_lock, flags);
+ return state;
+}
+
+static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
+ struct snd_sst_alloc_params_ext *alloc_param)
+{
+ unsigned int channels;
+ snd_pcm_uframes_t period_size;
+ ssize_t periodbytes;
+ ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
+ u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+
+ channels = substream->runtime->channels;
+ period_size = substream->runtime->period_size;
+ periodbytes = samples_to_bytes(substream->runtime, period_size);
+ alloc_param->ring_buf_info[0].addr = buffer_addr;
+ alloc_param->ring_buf_info[0].size = buffer_bytes;
+ alloc_param->sg_count = 1;
+ alloc_param->reserved = 0;
+ alloc_param->frag_size = periodbytes * channels;
+
+ pr_debug("period_size = %d\n", alloc_param->frag_size);
+ pr_debug("ring_buf_addr = 0x%x\n", alloc_param->ring_buf_info[0].addr);
+}
+static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
+ struct snd_sst_stream_params *param)
+{
+ param->uc.pcm_params.num_chan = (u8) substream->runtime->channels;
+ param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
+ param->uc.pcm_params.sfreq = substream->runtime->rate;
+
+ /* PCM stream via ALSA interface */
+ param->uc.pcm_params.use_offload_path = 0;
+ param->uc.pcm_params.reserved2 = 0;
+ memset(param->uc.pcm_params.channel_map, 0, sizeof(u8));
+ pr_debug("sfreq= %d, wd_sz = %d\n",
+ param->uc.pcm_params.sfreq, param->uc.pcm_params.pcm_wd_sz);
+
+}
+
+#define ASSIGN_PIPE_ID(periodtime, lowlatency, deepbuffer) \
+ ((periodtime) <= (lowlatency) ? PIPE_LOW_PCM0_IN : \
+ ((periodtime) >= (deepbuffer) ? PIPE_MEDIA3_IN : PIPE_MEDIA1_IN))
+
+static int sst_get_stream_mapping(int dev, int sdev, int dir,
+ struct sst_dev_stream_map *map, int size, u8 pipe_id,
+ const struct sst_lowlatency_deepbuff *ll_db)
+{
+ int index;
+ unsigned long pt = 0, ll = 0, db = 0;
+
+ if (map == NULL)
+ return -EINVAL;
+
+ pr_debug("dev %d sdev %d dir %d\n", dev, sdev, dir);
+
+ /* index 0 is not used in stream map */
+ for (index = 1; index < size; index++) {
+ if ((map[index].dev_num == dev) &&
+ (map[index].subdev_num == sdev) &&
+ (map[index].direction == dir)) {
+ /* device id for the probe is assigned dynamically */
+ if (map[index].status == SST_DEV_MAP_IN_USE) {
+ return index;
+ } else if (map[index].status == SST_DEV_MAP_FREE) {
+ map[index].status = SST_DEV_MAP_IN_USE;
+
+ if (map[index].dev_num == MERR_SALTBAY_PROBE) {
+ map[index].device_id = pipe_id;
+
+ } else if (map[index].dev_num == MERR_SALTBAY_AUDIO) {
+ if (!ll_db->low_latency || !ll_db->deep_buffer)
+ return -EINVAL;
+
+ pt = ll_db->period_time;
+ ll = *(ll_db->low_latency);
+ db = *(ll_db->deep_buffer);
+
+ pr_debug("PT %lu LL %lu DB %lu\n", pt, ll, db);
+
+ map[index].device_id = ASSIGN_PIPE_ID(pt,
+ ll, db);
+ }
+ pr_debug("%s: pipe_id 0%x index %d", __func__,
+ map[index].device_id, index);
+
+ return index;
+ }
+ }
+ }
+ return 0;
+}
+
+int sst_fill_stream_params(void *substream,
+ const struct sst_data *ctx, struct snd_sst_params *str_params, bool is_compress)
+{
+ int map_size;
+ int index;
+ struct sst_dev_stream_map *map;
+ struct snd_pcm_substream *pstream = NULL;
+ struct snd_compr_stream *cstream = NULL;
+
+ map = ctx->pdata->pdev_strm_map;
+ map_size = ctx->pdata->strm_map_size;
+
+ if (is_compress == true)
+ cstream = (struct snd_compr_stream *)substream;
+ else
+ pstream = (struct snd_pcm_substream *)substream;
+
+ str_params->stream_type = SST_STREAM_TYPE_MUSIC;
+
+ /* For pcm streams */
+ if (pstream) {
+ index = sst_get_stream_mapping(pstream->pcm->device,
+ pstream->number, pstream->stream,
+ map, map_size, ctx->pipe_id, &ctx->ll_db);
+ if (index <= 0)
+ return -EINVAL;
+
+ str_params->stream_id = index;
+ str_params->device_type = map[index].device_id;
+ str_params->task = map[index].task_id;
+
+ if (str_params->device_type == SST_PROBE_IN)
+ str_params->stream_type = SST_STREAM_TYPE_PROBE;
+
+ pr_debug("str_id = %d, device_type = 0x%x, task = %d",
+ str_params->stream_id, str_params->device_type,
+ str_params->task);
+
+ str_params->ops = (u8)pstream->stream;
+ }
+
+ if (cstream) {
+ /* FIXME: Add support for subdevice number in
+ * snd_compr_stream */
+ index = sst_get_stream_mapping(cstream->device->device,
+ 0, cstream->direction,
+ map, map_size, ctx->pipe_id, &ctx->ll_db);
+ if (index <= 0)
+ return -EINVAL;
+ str_params->stream_id = index;
+ str_params->device_type = map[index].device_id;
+ str_params->task = map[index].task_id;
+ pr_debug("compress str_id = %d, device_type = 0x%x, task = %d",
+ str_params->stream_id, str_params->device_type,
+ str_params->task);
+
+ str_params->ops = (u8)cstream->direction;
+ }
+ return 0;
+}
+
+#define CALC_PERIODTIME(period_size, rate) (((period_size) * 1000) / (rate))
+
+static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
+ struct snd_soc_platform *platform)
+{
+ struct sst_runtime_stream *stream =
+ substream->runtime->private_data;
+ struct snd_sst_stream_params param = {{{0,},},};
+ struct snd_sst_params str_params = {0};
+ struct snd_sst_alloc_params_ext alloc_params = {0};
+ int ret_val = 0;
+ struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+ /* set codec params and inform SST driver the same */
+ sst_fill_pcm_params(substream, ¶m);
+ sst_fill_alloc_params(substream, &alloc_params);
+ substream->runtime->dma_area = substream->dma_buffer.area;
+ str_params.sparams = param;
+ str_params.aparams = alloc_params;
+ str_params.codec = SST_CODEC_TYPE_PCM;
+
+ ctx->ll_db.period_time = CALC_PERIODTIME(substream->runtime->period_size,
+ substream->runtime->rate);
+
+ /* fill the device type and stream id to pass to SST driver */
+ ret_val = sst_fill_stream_params(substream, ctx, &str_params, false);
+ pr_debug("platform prepare: fill stream params ret_val = 0x%x\n", ret_val);
+ if (ret_val < 0)
+ return ret_val;
+
+ stream->stream_info.str_id = str_params.stream_id;
+
+ ret_val = stream->ops->open(&str_params);
+ pr_debug("platform prepare: stream open ret_val = 0x%x\n", ret_val);
+ if (ret_val <= 0)
+ return ret_val;
+
+ pr_debug("platform allocated strid: %d\n", stream->stream_info.str_id);
+
+ return ret_val;
+}
+
+static void sst_period_elapsed(void *mad_substream)
+{
+ struct snd_pcm_substream *substream = mad_substream;
+ struct sst_runtime_stream *stream;
+ int status;
+
+ if (!substream || !substream->runtime) {
+ pr_debug("In %s : Null Substream pointer\n", __func__);
+ return;
+ }
+ stream = substream->runtime->private_data;
+ if (!stream) {
+ pr_debug("In %s : Null Stream pointer\n", __func__);
+ return;
+ }
+ status = sst_get_stream_status(stream);
+ if (status != SST_PLATFORM_RUNNING) {
+ pr_debug("In %s : Stream Status=%d\n", __func__, status);
+ return;
+ }
+ snd_pcm_period_elapsed(substream);
+}
+
+static int sst_platform_init_stream(struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream =
+ substream->runtime->private_data;
+ int ret_val;
+
+ pr_debug("setting buffer ptr param\n");
+ sst_set_stream_status(stream, SST_PLATFORM_INIT);
+ stream->stream_info.period_elapsed = sst_period_elapsed;
+ stream->stream_info.mad_substream = substream;
+ stream->stream_info.buffer_ptr = 0;
+ stream->stream_info.sfreq = substream->runtime->rate;
+ pr_debug("pcm_substream %p, period_elapsed %p\n",
+ stream->stream_info.mad_substream, stream->stream_info.period_elapsed);
+ ret_val = stream->ops->device_control(
+ SST_SND_STREAM_INIT, &stream->stream_info);
+ if (ret_val)
+ pr_err("control_set ret error %d\n", ret_val);
+ return ret_val;
+
+}
+
+static inline int power_up_sst(struct sst_runtime_stream *sst)
+{
+ return sst->ops->power(true);
+}
+
+static inline int power_down_sst(struct sst_runtime_stream *sst)
+{
+ return sst->ops->power(false);
+}
+/* end -- helper functions */
+
+static int sst_media_open(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret_val = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sst_runtime_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ spin_lock_init(&stream->status_lock);
+
+ /* get the sst ops */
+ mutex_lock(&sst_dsp_lock);
+ if (!sst_dsp ||
+ !try_module_get(sst_dsp->dev->driver->owner)) {
+ pr_err("no device available to run\n");
+ ret_val = -ENODEV;
+ goto out_ops;
+ }
+ stream->ops = sst_dsp->ops;
+ mutex_unlock(&sst_dsp_lock);
+
+ stream->stream_info.str_id = 0;
+ sst_set_stream_status(stream, SST_PLATFORM_UNINIT);
+ stream->stream_info.mad_substream = substream;
+ runtime->private_data = stream;
+
+ if (strstr(dai->name, "Power-cpu-dai"))
+ return power_up_sst(stream);
+
+ /* Make sure, that the period size is always even */
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS, 2);
+
+ pr_debug("buf_ptr %llu\n", stream->stream_info.buffer_ptr);
+ return snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+out_ops:
+ kfree(stream);
+ mutex_unlock(&sst_dsp_lock);
+ return ret_val;
+}
+
+static void sst_free_stream_in_use(struct sst_dev_stream_map *map, int str_id)
+{
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+ return;
+#else
+ if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) ||
+ (map[str_id].dev_num == MERR_SALTBAY_PROBE)) {
+
+ /* Do nothing in capture for audio device */
+ if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) &&
+ (map[str_id].direction == SNDRV_PCM_STREAM_CAPTURE))
+ return;
+ if ((map[str_id].task_id == SST_TASK_ID_MEDIA) &&
+ (map[str_id].status == SST_DEV_MAP_IN_USE)) {
+ pr_debug("str_id %d device_id 0x%x\n", str_id, map[str_id].device_id);
+ map[str_id].status = SST_DEV_MAP_FREE;
+ map[str_id].device_id = PIPE_RSVD;
+ }
+ }
+ return;
+#endif
+}
+
+static void sst_media_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val = 0, str_id;
+ struct sst_data *ctx = snd_soc_platform_get_drvdata(dai->platform);
+
+ stream = substream->runtime->private_data;
+ if (strstr(dai->name, "Power-cpu-dai"))
+ ret_val = power_down_sst(stream);
+
+ str_id = stream->stream_info.str_id;
+ if (str_id)
+ ret_val = stream->ops->close(str_id);
+ sst_free_stream_in_use(ctx->pdata->pdev_strm_map, str_id);
+ module_put(sst_dsp->dev->driver->owner);
+ kfree(stream);
+ pr_debug("%s: %d\n", __func__, ret_val);
+}
+
+static int sst_dpcm_probe_cmd(struct snd_soc_platform *platform,
+ struct snd_pcm_substream *substream, u16 pipe_id, bool on)
+{
+ int ret = 0;
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+ if (substream->pcm->device == MERR_DPCM_PROBE)
+ ret = sst_dpcm_probe_send(platform, pipe_id, substream->number,
+ substream->stream, on);
+#endif
+ return ret;
+}
+
+static inline unsigned int get_current_pipe_id(struct snd_soc_platform *platform,
+ struct snd_pcm_substream *substream)
+{
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map;
+ struct sst_runtime_stream *stream =
+ substream->runtime->private_data;
+ u32 str_id = stream->stream_info.str_id;
+ unsigned int pipe_id;
+ pipe_id = map[str_id].device_id;
+
+ pr_debug("%s: got pipe_id = %#x for str_id = %d\n",
+ __func__, pipe_id, str_id);
+ return pipe_id;
+}
+
+static void sst_probe_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ u16 probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+ sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, false);
+ sst_media_close(substream, dai);
+}
+
+static int sst_media_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val = 0, str_id;
+
+ pr_debug("%s\n", __func__);
+
+ stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
+ if (stream->stream_info.str_id)
+ return ret_val;
+
+ ret_val = sst_platform_alloc_stream(substream, dai->platform);
+ if (ret_val <= 0)
+ return ret_val;
+ snprintf(substream->pcm->id, sizeof(substream->pcm->id),
+ "%d", stream->stream_info.str_id);
+
+ ret_val = sst_platform_init_stream(substream);
+ if (ret_val)
+ return ret_val;
+ substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
+
+ return ret_val;
+}
+
+static int sst_probe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ u16 probe_pipe_id;
+
+ sst_media_prepare(substream, dai);
+ probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+ return sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, true);
+}
+
+static int sst_media_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ pr_debug("%s\n", __func__);
+
+ snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+ return 0;
+}
+
+static int sst_media_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static struct snd_soc_dai_ops sst_media_dai_ops = {
+ .startup = sst_media_open,
+ .shutdown = sst_media_close,
+ .prepare = sst_media_prepare,
+ .hw_params = sst_media_hw_params,
+ .hw_free = sst_media_hw_free,
+ .set_tdm_slot = sst_platform_ihf_set_tdm_slot,
+ .mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_probe_dai_ops = {
+ .startup = sst_media_open,
+ .hw_params = sst_media_hw_params,
+ .hw_free = sst_media_hw_free,
+ .shutdown = sst_probe_close,
+ .prepare = sst_probe_prepare,
+};
+
+static struct snd_soc_dai_ops sst_loopback_dai_ops = {
+ .startup = sst_media_open,
+ .shutdown = sst_media_close,
+ .prepare = sst_media_prepare,
+};
+
+static struct snd_soc_dai_ops sst_compr_dai_ops = {
+ .mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_driver sst_platform_dai[] = {
+{
+ .name = SST_HEADSET_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Headset Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Headset Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_DEEPBUFFER_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Deepbuffer Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_LOWLATENCY_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Low Latency Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_SPEAKER_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Speaker Playback",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_VOICE_DAI,
+ .playback = {
+ .stream_name = "Voice Downlink",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Voice Uplink",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_COMPRESS_DAI,
+ .compress_dai = 1,
+ .ops = &sst_compr_dai_ops,
+ .playback = {
+ .stream_name = "Compress Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_VIRTUAL_DAI,
+ .playback = {
+ .stream_name = "Virtual Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_POWER_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Dummy Power Stream",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+{
+ .name = SST_PROBE_DAI,
+ .ops = &sst_probe_dai_ops,
+ .playback = {
+ .stream_name = "Probe Playback",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .capture = {
+ .stream_name = "Probe Capture",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+{
+ .name = SST_VOIP_DAI,
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "VOIP Playback",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "VOIP Capture",
+ .channels_min = SST_MONO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = SST_LOOPBACK_DAI,
+ .ops = &sst_loopback_dai_ops,
+ .capture = {
+ .stream_name = "Loopback Capture",
+ .channels_min = SST_MONO,
+ .channels_max = SST_MONO,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+/*BE CPU Dais */
+{
+ .name = "ssp2-codec",
+ .playback = {
+ .stream_name = "ssp2 playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "ssp2 Capture",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+};
+
+static int sst_platform_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+ pr_debug("sst_platform_open called:%s\n", dai_link->cpu_dai_name);
+ if (substream->pcm->internal)
+ return 0;
+ runtime = substream->runtime;
+ runtime->hw = sst_platform_pcm_hw;
+ return 0;
+}
+
+static int sst_platform_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ pr_debug("sst_platform_close called:%s\n", dai_link->cpu_dai_name);
+ return 0;
+}
+
+static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ int ret_val = 0, str_id;
+ struct sst_runtime_stream *stream;
+ int str_cmd, status, alsa_state;
+
+ if (substream->pcm->internal)
+ return 0;
+ pr_debug("sst_platform_pcm_trigger called\n");
+ stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
+ alsa_state = substream->runtime->status->state;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ pr_debug("Trigger Start\n");
+ str_cmd = SST_SND_START;
+ status = SST_PLATFORM_RUNNING;
+ stream->stream_info.mad_substream = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("Trigger stop\n");
+ str_cmd = SST_SND_DROP;
+ status = SST_PLATFORM_DROPPED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pr_debug("Trigger pause\n");
+ str_cmd = SST_SND_PAUSE;
+ status = SST_PLATFORM_PAUSED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("Trigger pause release\n");
+ str_cmd = SST_SND_RESUME;
+ status = SST_PLATFORM_RUNNING;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret_val = stream->ops->device_control(str_cmd, &str_id);
+ if (!ret_val)
+ sst_set_stream_status(stream, status);
+
+ return ret_val;
+}
+
+
+static snd_pcm_uframes_t sst_platform_pcm_pointer
+ (struct snd_pcm_substream *substream)
+{
+ struct sst_runtime_stream *stream;
+ int ret_val, status;
+ struct pcm_stream_info *str_info;
+
+ stream = substream->runtime->private_data;
+ status = sst_get_stream_status(stream);
+ if (status == SST_PLATFORM_INIT)
+ return 0;
+ str_info = &stream->stream_info;
+ ret_val = stream->ops->device_control(
+ SST_SND_BUFFER_POINTER, str_info);
+ if (ret_val) {
+ pr_err("sst: error code = %d\n", ret_val);
+ return ret_val;
+ }
+ substream->runtime->soc_delay = str_info->pcm_delay;
+ return str_info->buffer_ptr;
+}
+
+static struct snd_pcm_ops sst_platform_ops = {
+ .open = sst_platform_open,
+ .close = sst_platform_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .trigger = sst_platform_pcm_trigger,
+ .pointer = sst_platform_pcm_pointer,
+};
+
+static void sst_pcm_free(struct snd_pcm *pcm)
+{
+ pr_debug("sst_pcm_free called\n");
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
+ int retval = 0;
+
+ pr_debug("sst_pcm_new called\n");
+ if (dai->driver->playback.channels_min ||
+ dai->driver->capture.channels_min) {
+ retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_DMA),
+ SST_MAX_BUFFER, SST_MAX_BUFFER);
+ if (retval) {
+ pr_err("dma buffer allocationf fail\n");
+ return retval;
+ }
+ }
+ return retval;
+}
+
+static int sst_soc_probe(struct snd_soc_platform *platform)
+{
+ struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+ struct soft_platform_id spid;
+ int ret = 0;
+
+ memcpy(&spid, ctx->pdata->spid, sizeof(spid));
+ pr_err("Enter:%s\n", __func__);
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+ ret = sst_dsp_init_v2_dpcm(platform);
+#else
+ ret = sst_dsp_init(platform);
+#endif
+// if (ret)
+// return ret;
+// ret = snd_soc_register_effect(platform->card, &effects_ops);
+
+ return ret;
+}
+
+static int sst_soc_remove(struct snd_soc_platform *platform)
+{
+ pr_debug("%s called\n", __func__);
+ return 0;
+}
+
+static struct snd_soc_platform_driver sst_soc_platform_drv = {
+ .probe = sst_soc_probe,
+ .remove = sst_soc_remove,
+ .ops = &sst_platform_ops,
+ .compr_ops = &sst_platform_compr_ops,
+ .pcm_new = sst_pcm_new,
+ .pcm_free = sst_pcm_free,
+ .read = sst_soc_read,
+ .write = sst_soc_write,
+};
+
+int sst_register_dsp(struct sst_device *sst_dev)
+{
+ if (!sst_dev)
+ return -ENODEV;
+ mutex_lock(&sst_dsp_lock);
+ if (sst_dsp) {
+ pr_err("we already have a device %s\n", sst_dsp->name);
+ mutex_unlock(&sst_dsp_lock);
+ return -EEXIST;
+ }
+ pr_debug("registering device %s\n", sst_dev->name);
+
+ sst_dsp = sst_dev;
+ mutex_unlock(&sst_dsp_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_register_dsp);
+
+int sst_unregister_dsp(struct sst_device *dev)
+{
+ if (dev != sst_dsp)
+ return -EINVAL;
+
+ mutex_lock(&sst_dsp_lock);
+ if (sst_dsp) {
+ pr_debug("unregister %s\n", sst_dsp->name);
+ mutex_unlock(&sst_dsp_lock);
+ return -EIO;
+ }
+
+ sst_dsp = NULL;
+ mutex_unlock(&sst_dsp_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_unregister_dsp);
+
+static const struct snd_soc_component_driver pcm_component = {
+ .name = "pcm",
+};
+
+static int sst_platform_probe(struct platform_device *pdev)
+{
+ struct sst_data *sst;
+ int ret;
+ struct sst_platform_data *pdata = pdev->dev.platform_data;
+
+ pr_debug("sst_platform_probe called\n");
+ sst = devm_kzalloc(&pdev->dev, sizeof(*sst), GFP_KERNEL);
+ if (sst == NULL) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ sst_pdev = &pdev->dev;
+ sst->pdata = pdata;
+ mutex_init(&sst->lock);
+ dev_set_drvdata(&pdev->dev, sst);
+
+ ret = snd_soc_register_platform(&pdev->dev,
+ &sst_soc_platform_drv);
+ if (ret) {
+ pr_err("registering soc platform failed\n");
+ return ret;
+ }
+ ret = snd_soc_register_component(&pdev->dev, &pcm_component,
+ sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
+ if (ret) {
+ pr_err("registering cpu dais failed\n");
+ snd_soc_unregister_platform(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static int sst_platform_remove(struct platform_device *pdev)
+{
+
+ snd_soc_unregister_component(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
+ pr_debug("sst_platform_remove success\n");
+ return 0;
+}
+
+static struct platform_driver sst_platform_driver = {
+ .driver = {
+ .name = "sst-platform",
+ .owner = THIS_MODULE,
+ },
+ .probe = sst_platform_probe,
+ .remove = sst_platform_remove,
+};
+
+module_platform_driver(sst_platform_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sst-platform");
--- /dev/null
+/*
+ * controls_v1.c - Intel MID Platform driver ALSA controls for CTP
+ *
+ * Copyright (C) 2012 Intel Corp
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/slab.h>
+#include <sound/intel_sst_ioctl.h>
+#include <sound/soc.h>
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static int sst_set_mixer_param(unsigned int device_input_mixer)
+{
+ if (!sst_dsp) {
+ pr_err("sst: DSP not registered\n");
+ return -ENODEV;
+ }
+
+ /*allocate memory for params*/
+ return sst_dsp->ops->set_generic_params(SST_SET_ALGO_PARAMS,
+ (void *)&device_input_mixer);
+}
+static int lpe_mixer_ihf_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ ucontrol->value.integer.value[0] = sst->lpe_mixer_input_ihf;
+ return 0;
+}
+
+static int lpe_mixer_ihf_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int device_input_mixer;
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ pr_debug("input is None\n");
+ device_input_mixer = SST_STREAM_DEVICE_IHF
+ | SST_INPUT_STREAM_NONE;
+ break;
+ case 1:
+ pr_debug("input is PCM stream\n");
+ device_input_mixer = SST_STREAM_DEVICE_IHF
+ | SST_INPUT_STREAM_PCM;
+ break;
+ case 2:
+ pr_debug("input is Compress stream\n");
+ device_input_mixer = SST_STREAM_DEVICE_IHF
+ | SST_INPUT_STREAM_COMPRESS;
+ break;
+ case 3:
+ pr_debug("input is Mixed stream\n");
+ device_input_mixer = SST_STREAM_DEVICE_IHF
+ | SST_INPUT_STREAM_MIXED;
+ break;
+ default:
+ pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+ return -EINVAL;
+ }
+ sst->lpe_mixer_input_ihf = ucontrol->value.integer.value[0];
+ return sst_set_mixer_param(device_input_mixer);
+}
+
+static int lpe_mixer_headset_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ ucontrol->value.integer.value[0] = sst->lpe_mixer_input_hs;
+ return 0;
+}
+
+static int lpe_mixer_headset_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mixer_input_stream;
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ pr_debug("input is None\n");
+ mixer_input_stream = SST_STREAM_DEVICE_HS
+ | SST_INPUT_STREAM_NONE;
+ break;
+ case 1:
+ pr_debug("input is PCM stream\n");
+ mixer_input_stream = SST_STREAM_DEVICE_HS
+ | SST_INPUT_STREAM_PCM;
+ break;
+ case 2:
+ pr_debug("input is Compress stream\n");
+ mixer_input_stream = SST_STREAM_DEVICE_HS
+ | SST_INPUT_STREAM_COMPRESS;
+ break;
+ case 3:
+ pr_debug("input is Mixed stream\n");
+ mixer_input_stream = SST_STREAM_DEVICE_HS
+ | SST_INPUT_STREAM_MIXED;
+ break;
+ default:
+ pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+ return -EINVAL;
+ }
+ sst->lpe_mixer_input_hs = ucontrol->value.integer.value[0];
+ return sst_set_mixer_param(mixer_input_stream);
+}
+
+static int sst_probe_byte_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ if (!sst_dsp) {
+ pr_err("sst: DSP not registered\n");
+ return -ENODEV;
+ }
+
+ return sst_dsp->ops->set_generic_params(SST_GET_PROBE_BYTE_STREAM,
+ ucontrol->value.bytes.data);
+}
+
+static int sst_probe_byte_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ if (!sst_dsp) {
+ pr_err("sst: DSP not registered\n");
+ return -ENODEV;
+ }
+
+ return sst_dsp->ops->set_generic_params(SST_SET_PROBE_BYTE_STREAM,
+ ucontrol->value.bytes.data);
+}
+
+static const char *lpe_mixer_text[] = {
+ "None", "PCM", "Compressed", "Mixed",
+};
+
+static const struct soc_enum lpe_mixer_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lpe_mixer_text), lpe_mixer_text);
+
+static const struct snd_kcontrol_new sst_controls_clv[] = {
+ SOC_ENUM_EXT("LPE IHF mixer", lpe_mixer_enum,
+ lpe_mixer_ihf_get, lpe_mixer_ihf_set),
+ SOC_ENUM_EXT("LPE headset mixer", lpe_mixer_enum,
+ lpe_mixer_headset_get, lpe_mixer_headset_set),
+ SND_SOC_BYTES_EXT("SST Probe Byte Control", SST_MAX_BIN_BYTES,
+ sst_probe_byte_control_get,
+ sst_probe_byte_control_set),
+};
+
+int sst_platform_clv_init(struct snd_soc_platform *platform)
+{
+ struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+ ctx->lpe_mixer_input_hs = 0;
+ ctx->lpe_mixer_input_ihf = 0;
+ snd_soc_add_platform_controls(platform, sst_controls_clv,
+ ARRAY_SIZE(sst_controls_clv));
+ return 0;
+}
--- /dev/null
+/*
+ * controls_v2.c - Intel MID Platform driver ALSA controls for Mrfld
+ *
+ * Copyright (C) 2012 Intel Corp
+ * Author: Vinod Koul <vinod.koul@ilinux.intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <sound/soc.h>
+#include <sound/asound.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "ipc_lib.h"
+#include "controls_v2.h"
+
+
+#define SST_ALGO_KCONTROL_INT(xname, xreg, xshift, xmax, xinvert,\
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, default_val) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = sst_algo_int_ctl_info, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_algo_int_control_v2) \
+ {.mc.reg = xreg, .mc.rreg = xreg, .mc.shift = xshift, \
+ .mc.rshift = xshift, .mc.max = xmax, .mc.platform_max = xmax, \
+ .mc.invert = xinvert, .module_id = xmod, .pipe_id = xpipe, \
+ .instance_id = xinstance, .value = default_val } }
+/* Thresholds for Low Latency & Deep Buffer*/
+#define DEFAULT_LOW_LATENCY 10 /* In Ms */
+#define DEFAULT_DEEP_BUFFER 96
+
+unsigned long ll_threshold = DEFAULT_LOW_LATENCY;
+unsigned long db_threshold = DEFAULT_DEEP_BUFFER;
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+ struct soc_mixer_control *mc = &amc->mc;
+ int platform_max;
+
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = platform_max;
+ return 0;
+}
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform,
+ unsigned int reg)
+{
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ pr_debug("%s for reg %d val=%d\n", __func__, reg, sst->widget[reg]);
+ BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+ return sst->widget[reg];
+}
+
+int sst_soc_write(struct snd_soc_platform *platform,
+ unsigned int reg, unsigned int val)
+{
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ pr_debug("%s for reg %d val %d\n", __func__, reg, val);
+ BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+ sst->widget[reg] = val;
+ return 0;
+}
+
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+ unsigned int shift, unsigned int max)
+{
+ unsigned int mask = (1 << fls(max)) - 1;
+
+ return (sst->widget[reg] >> shift) & mask;
+}
+
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+ unsigned int shift, unsigned int max, unsigned int val)
+{
+ unsigned int mask = (1 << fls(max)) - 1;
+
+ val &= mask;
+ val <<= shift;
+ sst->widget[reg] &= ~(mask << shift);
+ sst->widget[reg] |= val;
+ return val;
+}
+
+int sst_mix_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+ unsigned int mask = (1 << fls(mc->max)) - 1;
+ unsigned int val;
+ int connect;
+ struct snd_soc_dapm_update update;
+
+ pr_debug("%s called set %ld for %s\n", __func__,
+ ucontrol->value.integer.value[0], widget->name);
+ val = sst_reg_write(sst, mc->reg, mc->shift, mc->max, ucontrol->value.integer.value[0]);
+ connect = !!val;
+
+ widget->value = val;
+ update.kcontrol = kcontrol;
+ update.widget = widget;
+ update.reg = mc->reg;
+ update.mask = mask;
+ update.val = val;
+
+ widget->dapm->update = &update;
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, connect);
+ widget->dapm->update = NULL;
+ return 0;
+}
+
+int sst_mix_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *w = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+ pr_debug("%s called for %s\n", __func__, w->name);
+ ucontrol->value.integer.value[0] = !!sst_reg_read(sst, mc->reg, mc->shift, mc->max);
+ return 0;
+}
+
+static const struct snd_kcontrol_new sst_mix_modem_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_MODEM, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_MODEM, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_MODEM, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_MODEM, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MODEM, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_MODEM, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_MODEM, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_MODEM, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_MODEM, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MODEM, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MODEM, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_MODEM, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_MODEM, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_MODEM, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_MODEM, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_MODEM, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_MODEM, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_MODEM, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_MODEM, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec0_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_CODEC0, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_CODEC0, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC0, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC0, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC0, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC0, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC0, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_CODEC0, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC0, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC0, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC0, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_CODEC0, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_CODEC0, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC0, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC0, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_CODEC0, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_CODEC0, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_CODEC0, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_CODEC0, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec1_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_CODEC1, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_CODEC1, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC1, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC1, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC1, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC1, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC1, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_CODEC1, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC1, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC1, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC1, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_CODEC1, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_CODEC1, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC1, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC1, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_CODEC1, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_CODEC1, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_CODEC1, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_CODEC1, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sprot_l0_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_LOOP0, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_LOOP0, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP0, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP0, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP0, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP0, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP0, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_LOOP0, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP0, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP0, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP0, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_LOOP0, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_LOOP0, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP0, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP0, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_LOOP0, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_LOOP0, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_LOOP0, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_LOOP0, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l1_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_LOOP1, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_LOOP1, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP1, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP1, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP1, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP1, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP1, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_LOOP1, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP1, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP1, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP1, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_LOOP1, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_LOOP1, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP1, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP1, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_LOOP1, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_LOOP1, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_LOOP1, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_LOOP1, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l2_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_LOOP2, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_LOOP2, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP2, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP2, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP2, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP2, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP2, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_LOOP2, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP2, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP2, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP2, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_LOOP2, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_LOOP2, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP2, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP2, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_LOOP2, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_LOOP2, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_LOOP2, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_LOOP2, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_tx_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_SPEECH, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_SPEECH, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_SPEECH, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_SPEECH, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_SPEECH, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_SPEECH, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_SPEECH, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_SPEECH, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_SPEECH, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_SPEECH, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_SPEECH, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_SPEECH, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_SPEECH, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_SPEECH, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_SPEECH, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_SPEECH, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_SPEECH, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_SPEECH, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_SPEECH, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_rx_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_RXSPEECH, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_RXSPEECH, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_RXSPEECH, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_RXSPEECH, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_RXSPEECH, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_RXSPEECH, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_RXSPEECH, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_RXSPEECH, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_RXSPEECH, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_RXSPEECH, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_RXSPEECH, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_RXSPEECH, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_RXSPEECH, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_RXSPEECH, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_RXSPEECH, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_RXSPEECH, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_RXSPEECH, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_RXSPEECH, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_RXSPEECH, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_voip_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_VOIP, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_VOIP, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_VOIP, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_VOIP, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VOIP, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_VOIP, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_VOIP, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_VOIP, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_VOIP, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VOIP, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VOIP, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_VOIP, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_VOIP, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_VOIP, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_VOIP, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_VOIP, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_VOIP, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_VOIP, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_VOIP, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm0_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_PCM0, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_PCM0, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_PCM0, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_PCM0, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM0, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM0, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM0, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_PCM0, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM0, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM0, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM0, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_PCM0, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_PCM0, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_PCM0, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_PCM0, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_PCM0, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_PCM0, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_PCM0, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_PCM0, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm1_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_PCM1, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_PCM1, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_PCM1, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_PCM1, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM1, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM1, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM1, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_PCM1, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM1, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM1, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM1, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_PCM1, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_PCM1, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_PCM1, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_PCM1, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_PCM1, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_PCM1, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_PCM1, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_PCM1, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm2_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_PCM2, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_PCM2, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_PCM2, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_PCM2, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM2, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM2, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM2, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_PCM2, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM2, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM2, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM2, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_PCM2, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_PCM2, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_PCM2, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_PCM2, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_PCM2, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_PCM2, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_PCM2, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_PCM2, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_aware_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_AWARE, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_AWARE, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_AWARE, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_AWARE, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_AWARE, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_AWARE, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_AWARE, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_AWARE, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_AWARE, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_AWARE, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_AWARE, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_AWARE, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_AWARE, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_AWARE, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_AWARE, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_AWARE, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_AWARE, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_AWARE, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_AWARE, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_vad_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_VAD, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_VAD, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_VAD, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_VAD, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VAD, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_VAD, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_VAD, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_VAD, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_VAD, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VAD, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VAD, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_VAD, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_VAD, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_VAD, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_VAD, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_VAD, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_VAD, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_VAD, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_VAD, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media0_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA0, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_MEDIA0, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA0, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA0, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA0, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA0, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA0, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA0, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA0, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA0, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA0, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA0, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA0, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA0, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA0, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA0, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA0, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA0, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_MEDIA0, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media1_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA1, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_MEDIA1, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA1, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA1, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA1, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA1, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA1, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA1, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA1, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA1, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA1, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA1, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA1, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA1, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA1, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA1, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA1, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA1, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_MEDIA1, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_fm_controls[] = {
+ SOC_SINGLE_EXT("Modem", SST_MIX_FM, 0, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("BT", SST_MIX_FM, 1, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec0", SST_MIX_FM, 2, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Codec1", SST_MIX_FM, 3, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sprot_L0", SST_MIX_FM, 4, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L1", SST_MIX_FM, 5, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media_L2", SST_MIX_FM, 6, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Probe", SST_MIX_FM, 7, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Sidetone", SST_MIX_FM, 8, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Tx", SST_MIX_FM, 9, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Speech_Rx", SST_MIX_FM, 10, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Tone", SST_MIX_FM, 11, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Voip", SST_MIX_FM, 12, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM0", SST_MIX_FM, 13, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("PCM1", SST_MIX_FM, 14, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media0", SST_MIX_FM, 15, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media1", SST_MIX_FM, 16, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("Media2", SST_MIX_FM, 17, 1, 0,
+ sst_mix_get, sst_mix_put),
+ SOC_SINGLE_EXT("FM", SST_MIX_FM, 18, 1, 0,
+ sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sw_modem =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 0, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec0 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 1, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec1 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 2, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_sprot_l0 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 3, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l1 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 4, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l2 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 5, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_tx =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 6, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_rx =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 7, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_voip =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 8, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm0 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 9, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm1 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 10, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm2 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 11, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 12, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_vad =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 13, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media0 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 14, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media1 =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 15, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_fm =
+ SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 16, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_modem =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 0, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec0 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 1, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec1 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 2, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_tx =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 6, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_rx =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 7, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_voip =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 8, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm0 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 9, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm1 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 10, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm2 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 11, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_aware =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 12, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_vad =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 13, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media0 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 14, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media1 =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 15, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_fm =
+ SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 16, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_modem =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 0, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec0 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 1, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec1 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 2, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_sidetone =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 3, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_tx =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 4, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_rx =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 5, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_tone =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 6, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_voip =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 7, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm0 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 8, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm1 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 9, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media0 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 10, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media1 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 11, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media2 =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 12, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_fm =
+ SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 13, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("Modem IN"),
+ SND_SOC_DAPM_INPUT("Codec IN0"),
+ SND_SOC_DAPM_INPUT("Codec IN1"),
+ SND_SOC_DAPM_INPUT("Tone IN"),
+ SND_SOC_DAPM_INPUT("FM IN"),
+ SND_SOC_DAPM_OUTPUT("Modem OUT"),
+ SND_SOC_DAPM_OUTPUT("Codec OUT0"),
+ SND_SOC_DAPM_OUTPUT("Codec OUT1"),
+ SND_SOC_DAPM_OUTPUT("FM OUT"),
+ SND_SOC_DAPM_AIF_IN("Voip IN", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("Media IN0", "Compress", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("Media IN1", "PCM", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("Voip OUT", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("PCM1 OUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("Aware OUT", "Aware", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VAD OUT", "VAD", 0, SND_SOC_NOPM, 0, 0),
+
+ /* output mixers */
+ SND_SOC_DAPM_MIXER("MIX Modem", SND_SOC_NOPM, 0, 0,
+ sst_mix_modem_controls, ARRAY_SIZE(sst_mix_modem_controls)),
+ SND_SOC_DAPM_MIXER("MIX Codec0", SND_SOC_NOPM, 0, 0,
+ sst_mix_codec0_controls , ARRAY_SIZE(sst_mix_codec0_controls)),
+ SND_SOC_DAPM_MIXER("MIX Codec1", SND_SOC_NOPM, 0, 0,
+ sst_mix_codec1_controls, ARRAY_SIZE(sst_mix_codec1_controls)),
+ SND_SOC_DAPM_MIXER("MIX Sprot L0", SND_SOC_NOPM, 0, 0,
+ sst_mix_sprot_l0_controls, ARRAY_SIZE(sst_mix_sprot_l0_controls)),
+ SND_SOC_DAPM_MIXER("MIX Media L1", SND_SOC_NOPM, 0, 0,
+ sst_mix_media_l1_controls, ARRAY_SIZE(sst_mix_media_l1_controls)),
+ SND_SOC_DAPM_MIXER("MIX Media L2", SND_SOC_NOPM, 0, 0,
+ sst_mix_media_l2_controls, ARRAY_SIZE(sst_mix_media_l2_controls)),
+ SND_SOC_DAPM_MIXER("MIX Speech Tx", SND_SOC_NOPM, 0, 0,
+ sst_mix_speech_tx_controls, ARRAY_SIZE(sst_mix_speech_tx_controls)),
+ SND_SOC_DAPM_MIXER("MIX Speech Rx", SND_SOC_NOPM, 0, 0,
+ sst_mix_speech_rx_controls, ARRAY_SIZE(sst_mix_speech_rx_controls)),
+ SND_SOC_DAPM_MIXER("MIX Voip", SND_SOC_NOPM, 0, 0,
+ sst_mix_voip_controls, ARRAY_SIZE(sst_mix_voip_controls)),
+ SND_SOC_DAPM_MIXER("MIX PCM0", SND_SOC_NOPM, 0, 0,
+ sst_mix_pcm0_controls, ARRAY_SIZE(sst_mix_pcm0_controls)),
+ SND_SOC_DAPM_MIXER("MIX PCM1", SND_SOC_NOPM, 0, 0,
+ sst_mix_pcm1_controls, ARRAY_SIZE(sst_mix_pcm1_controls)),
+ SND_SOC_DAPM_MIXER("MIX PCM2", SND_SOC_NOPM, 0, 0,
+ sst_mix_pcm2_controls, ARRAY_SIZE(sst_mix_pcm2_controls)),
+ SND_SOC_DAPM_MIXER("MIX Aware", SND_SOC_NOPM, 0, 0,
+ sst_mix_aware_controls, ARRAY_SIZE(sst_mix_aware_controls)),
+ SND_SOC_DAPM_MIXER("MIX VAD", SND_SOC_NOPM, 0, 0,
+ sst_mix_vad_controls, ARRAY_SIZE(sst_mix_vad_controls)),
+ SND_SOC_DAPM_MIXER("MIX Media0", SND_SOC_NOPM, 0, 0,
+ sst_mix_media0_controls, ARRAY_SIZE(sst_mix_media0_controls)),
+ SND_SOC_DAPM_MIXER("MIX Media1", SND_SOC_NOPM, 0, 0,
+ sst_mix_media1_controls, ARRAY_SIZE(sst_mix_media1_controls)),
+ SND_SOC_DAPM_MIXER("MIX FM", SND_SOC_NOPM, 0, 0,
+ sst_mix_fm_controls, ARRAY_SIZE(sst_mix_fm_controls)),
+
+ /* switches for mixer outputs */
+ SND_SOC_DAPM_SWITCH("Mix Modem Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_modem),
+ SND_SOC_DAPM_SWITCH("Mix Codec0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_codec0),
+ SND_SOC_DAPM_SWITCH("Mix Codec1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_codec1),
+ SND_SOC_DAPM_SWITCH("Mix Sprot L0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_sprot_l0),
+ SND_SOC_DAPM_SWITCH("Mix Media L1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_media_l1),
+ SND_SOC_DAPM_SWITCH("Mix Media L2 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_media_l2),
+ SND_SOC_DAPM_SWITCH("Mix Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_speech_tx),
+ SND_SOC_DAPM_SWITCH("Mix Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_speech_rx),
+ SND_SOC_DAPM_SWITCH("Mix Voip Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_voip),
+ SND_SOC_DAPM_SWITCH("Mix PCM0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_pcm0),
+ SND_SOC_DAPM_SWITCH("Mix PCM1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_pcm1),
+ SND_SOC_DAPM_SWITCH("Mix PCM2 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_pcm2),
+ SND_SOC_DAPM_SWITCH("Mix Aware Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_aware),
+ SND_SOC_DAPM_SWITCH("Mix VAD Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_vad),
+ SND_SOC_DAPM_SWITCH("Mix Media0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_media0),
+ SND_SOC_DAPM_SWITCH("Mix Media1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_media1),
+ SND_SOC_DAPM_SWITCH("Mix FM Switch", SND_SOC_NOPM, 0, 0,
+ &sst_mix_sw_fm),
+
+ /* output pipeline switches */
+ SND_SOC_DAPM_SWITCH("Out Modem Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_modem),
+ SND_SOC_DAPM_SWITCH("Out Codec0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_codec0),
+ SND_SOC_DAPM_SWITCH("Out Codec1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_codec1),
+ SND_SOC_DAPM_SWITCH("Out Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_speech_tx),
+ SND_SOC_DAPM_SWITCH("Out Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_speech_rx),
+ SND_SOC_DAPM_SWITCH("Out Voip Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_voip),
+ SND_SOC_DAPM_SWITCH("Out PCM0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_pcm0),
+ SND_SOC_DAPM_SWITCH("Out PCM1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_pcm1),
+ SND_SOC_DAPM_SWITCH("Out PCM2 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_pcm2),
+ SND_SOC_DAPM_SWITCH("Out Aware Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_aware),
+ SND_SOC_DAPM_SWITCH("Out VAD Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_vad),
+ SND_SOC_DAPM_SWITCH("Out Media0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_media0),
+ SND_SOC_DAPM_SWITCH("Out Media1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_media1),
+ SND_SOC_DAPM_SWITCH("Out FM Switch", SND_SOC_NOPM, 0, 0,
+ &sst_out_sw_fm),
+
+ /* Input pipeline switches */
+ SND_SOC_DAPM_SWITCH("In Modem Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_modem),
+ SND_SOC_DAPM_SWITCH("In Codec0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_codec0),
+ SND_SOC_DAPM_SWITCH("In Codec1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_codec1),
+ SND_SOC_DAPM_SWITCH("In Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_speech_tx),
+ SND_SOC_DAPM_SWITCH("In Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_speech_rx),
+ SND_SOC_DAPM_SWITCH("In Tone Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_tone),
+ SND_SOC_DAPM_SWITCH("In Voip Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_voip),
+ SND_SOC_DAPM_SWITCH("In PCM0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_pcm0),
+ SND_SOC_DAPM_SWITCH("In PCM1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_pcm1),
+ SND_SOC_DAPM_SWITCH("In Media0 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_media0),
+ SND_SOC_DAPM_SWITCH("In Media1 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_media1),
+ SND_SOC_DAPM_SWITCH("In Media2 Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_media2),
+ SND_SOC_DAPM_SWITCH("In FM Switch", SND_SOC_NOPM, 0, 0,
+ &sst_in_sw_fm),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ /* media mixer settings */
+ { "In Media0 Switch", "Switch", "Media IN0"},
+ { "In Media1 Switch", "Switch", "Media IN1"},
+ { "MIX Media0", "Media0", "In Media0 Switch"},
+ { "MIX Media0", "Media1", "In Media1 Switch"},
+ { "MIX Media0", "Media2", "In Media2 Switch"},
+ { "MIX Media1", "Media0", "In Media0 Switch"},
+ { "MIX Media1", "Media1", "In Media1 Switch"},
+ { "MIX Media1", "Media2", "In Media2 Switch"},
+
+ /* media to main mixer intercon */
+ /* two media paths from media to main */
+ { "Mix Media0 Switch", "Switch", "MIX Media0"},
+ { "Out Media0 Switch", "Switch", "Mix Media0 Switch"},
+ { "In PCM0 Switch", "Switch", "Out Media0 Switch"},
+ { "Mix Media1 Switch", "Switch", "MIX Media1"},
+ { "Out Media1 Switch", "Switch", "Mix Media1 Switch"},
+ { "In PCM1 Switch", "Switch", "Out Media1 Switch"},
+ /* one back from main to media */
+ { "Mix PCM0 Switch", "Switch", "MIX PCM0"},
+ { "Out PCM0 Switch", "Switch", "Mix PCM0 Switch"},
+ { "In Media2 Switch", "Switch", "Out PCM0 Switch"},
+
+ /* main mixer inputs - all inputs connect to mixer */
+ { "MIX Modem", "Modem", "In Modem Switch"},
+ { "MIX Modem", "Codec0", "In Codec0 Switch"},
+ { "MIX Modem", "Codec1", "In Codec1 Switch"},
+ { "MIX Modem", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Modem", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Modem", "Tone", "In Tone Switch"},
+ { "MIX Modem", "Voip", "In Voip Switch"},
+ { "MIX Modem", "PCM0", "In PCM0 Switch"},
+ { "MIX Modem", "PCM1", "In PCM1 Switch"},
+ { "MIX Modem", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Modem", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Modem", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Modem", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Modem", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Codec0", "Modem", "In Modem Switch"},
+ { "MIX Codec0", "Codec0", "In Codec0 Switch"},
+ { "MIX Codec0", "Codec1", "In Codec1 Switch"},
+ { "MIX Codec0", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Codec0", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Codec0", "Tone", "In Tone Switch"},
+ { "MIX Codec0", "Voip", "In Voip Switch"},
+ { "MIX Codec0", "PCM0", "In PCM0 Switch"},
+ { "MIX Codec0", "PCM1", "In PCM1 Switch"},
+ { "MIX Codec0", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Codec0", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Codec0", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Codec0", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Codec0", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Codec1", "Modem", "In Modem Switch"},
+ { "MIX Codec1", "Codec0", "In Codec0 Switch"},
+ { "MIX Codec1", "Codec1", "In Codec1 Switch"},
+ { "MIX Codec1", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Codec1", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Codec1", "Tone", "In Tone Switch"},
+ { "MIX Codec1", "Voip", "In Voip Switch"},
+ { "MIX Codec1", "PCM0", "In PCM0 Switch"},
+ { "MIX Codec1", "PCM1", "In PCM1 Switch"},
+ { "MIX Codec1", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Codec1", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Codec1", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Codec1", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Codec1", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Sprot L0", "Modem", "In Modem Switch"},
+ { "MIX Sprot L0", "Codec0", "In Codec0 Switch"},
+ { "MIX Sprot L0", "Codec1", "In Codec1 Switch"},
+ { "MIX Sprot L0", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Sprot L0", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Sprot L0", "Tone", "In Tone Switch"},
+ { "MIX Sprot L0", "Voip", "In Voip Switch"},
+ { "MIX Sprot L0", "PCM0", "In PCM0 Switch"},
+ { "MIX Sprot L0", "PCM1", "In PCM1 Switch"},
+ { "MIX Sprot L0", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Sprot L0", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Sprot L0", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Sprot L0", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Sprot L0", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Media L1", "Modem", "In Modem Switch"},
+ { "MIX Media L1", "Codec0", "In Codec0 Switch"},
+ { "MIX Media L1", "Codec1", "In Codec1 Switch"},
+ { "MIX Media L1", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Media L1", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Media L1", "Tone", "In Tone Switch"},
+ { "MIX Media L1", "Voip", "In Voip Switch"},
+ { "MIX Media L1", "PCM0", "In PCM0 Switch"},
+ { "MIX Media L1", "PCM1", "In PCM1 Switch"},
+ { "MIX Media L1", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Media L1", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Media L1", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Media L1", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Media L1", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Media L2", "Modem", "In Modem Switch"},
+ { "MIX Media L2", "Codec0", "In Codec0 Switch"},
+ { "MIX Media L2", "Codec1", "In Codec1 Switch"},
+ { "MIX Media L2", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Media L2", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Media L2", "Tone", "In Tone Switch"},
+ { "MIX Media L2", "Voip", "In Voip Switch"},
+ { "MIX Media L2", "PCM0", "In PCM0 Switch"},
+ { "MIX Media L2", "PCM1", "In PCM1 Switch"},
+ { "MIX Media L2", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Media L2", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Media L2", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Media L2", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Media L2", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Speech Rx", "Modem", "In Modem Switch"},
+ { "MIX Speech Rx", "Codec0", "In Codec0 Switch"},
+ { "MIX Speech Rx", "Codec1", "In Codec1 Switch"},
+ { "MIX Speech Rx", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Speech Rx", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Speech Rx", "Tone", "In Tone Switch"},
+ { "MIX Speech Rx", "Voip", "In Voip Switch"},
+ { "MIX Speech Rx", "PCM0", "In PCM0 Switch"},
+ { "MIX Speech Rx", "PCM1", "In PCM1 Switch"},
+ { "MIX Speech Rx", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Speech Rx", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Speech Rx", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Speech Rx", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Speech Rx", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Speech Tx", "Modem", "In Modem Switch"},
+ { "MIX Speech Tx", "Codec0", "In Codec0 Switch"},
+ { "MIX Speech Tx", "Codec1", "In Codec1 Switch"},
+ { "MIX Speech Tx", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Speech Tx", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Speech Tx", "Tone", "In Tone Switch"},
+ { "MIX Speech Tx", "Voip", "In Voip Switch"},
+ { "MIX Speech Tx", "PCM0", "In PCM0 Switch"},
+ { "MIX Speech Tx", "PCM1", "In PCM1 Switch"},
+ { "MIX Speech Tx", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Speech Tx", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Speech Tx", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Speech Tx", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Speech Tx", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Voip", "Modem", "In Modem Switch"},
+ { "MIX Voip", "Codec0", "In Codec0 Switch"},
+ { "MIX Voip", "Codec1", "In Codec1 Switch"},
+ { "MIX Voip", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Voip", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Voip", "Tone", "In Tone Switch"},
+ { "MIX Voip", "Voip", "In Voip Switch"},
+ { "MIX Voip", "PCM0", "In PCM0 Switch"},
+ { "MIX Voip", "PCM1", "In PCM1 Switch"},
+ { "MIX Voip", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Voip", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Voip", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Voip", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Voip", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX PCM0", "Modem", "In Modem Switch"},
+ { "MIX PCM0", "Codec0", "In Codec0 Switch"},
+ { "MIX PCM0", "Codec1", "In Codec1 Switch"},
+ { "MIX PCM0", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX PCM0", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX PCM0", "Tone", "In Tone Switch"},
+ { "MIX PCM0", "Voip", "In Voip Switch"},
+ { "MIX PCM0", "PCM0", "In PCM0 Switch"},
+ { "MIX PCM0", "PCM1", "In PCM1 Switch"},
+ { "MIX PCM0", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX PCM0", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX PCM0", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX PCM0", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX PCM0", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX PCM1", "Modem", "In Modem Switch"},
+ { "MIX PCM1", "Codec0", "In Codec0 Switch"},
+ { "MIX PCM1", "Codec1", "In Codec1 Switch"},
+ { "MIX PCM1", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX PCM1", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX PCM1", "Tone", "In Tone Switch"},
+ { "MIX PCM1", "Voip", "In Voip Switch"},
+ { "MIX PCM1", "PCM0", "In PCM0 Switch"},
+ { "MIX PCM1", "PCM1", "In PCM1 Switch"},
+ { "MIX PCM1", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX PCM1", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX PCM1", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX PCM1", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX PCM1", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX PCM2", "Modem", "In Modem Switch"},
+ { "MIX PCM2", "Codec0", "In Codec0 Switch"},
+ { "MIX PCM2", "Codec1", "In Codec1 Switch"},
+ { "MIX PCM2", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX PCM2", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX PCM2", "Tone", "In Tone Switch"},
+ { "MIX PCM2", "Voip", "In Voip Switch"},
+ { "MIX PCM2", "PCM0", "In PCM0 Switch"},
+ { "MIX PCM2", "PCM1", "In PCM1 Switch"},
+ { "MIX PCM2", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX PCM2", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX PCM2", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX PCM2", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX PCM2", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX Aware", "Modem", "In Modem Switch"},
+ { "MIX Aware", "Codec0", "In Codec0 Switch"},
+ { "MIX Aware", "Codec1", "In Codec1 Switch"},
+ { "MIX Aware", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX Aware", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX Aware", "Tone", "In Tone Switch"},
+ { "MIX Aware", "Voip", "In Voip Switch"},
+ { "MIX Aware", "PCM0", "In PCM0 Switch"},
+ { "MIX Aware", "PCM1", "In PCM1 Switch"},
+ { "MIX Aware", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX Aware", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX Aware", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX Aware", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX Aware", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX VAD", "Modem", "In Modem Switch"},
+ { "MIX VAD", "Codec0", "In Codec0 Switch"},
+ { "MIX VAD", "Codec1", "In Codec1 Switch"},
+ { "MIX VAD", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX VAD", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX VAD", "Tone", "In Tone Switch"},
+ { "MIX VAD", "Voip", "In Voip Switch"},
+ { "MIX VAD", "PCM0", "In PCM0 Switch"},
+ { "MIX VAD", "PCM1", "In PCM1 Switch"},
+ { "MIX VAD", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX VAD", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX VAD", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX VAD", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX VAD", "Sidetone", "Mix Speech Tx Switch"},
+
+ { "MIX FM", "Modem", "In Modem Switch"},
+ { "MIX FM", "Codec0", "In Codec0 Switch"},
+ { "MIX FM", "Codec1", "In Codec1 Switch"},
+ { "MIX FM", "Speech_Tx", "In Speech Tx Switch"},
+ { "MIX FM", "Speech_Rx", "In Speech Rx Switch"},
+ { "MIX FM", "Tone", "In Tone Switch"},
+ { "MIX FM", "Voip", "In Voip Switch"},
+ { "MIX FM", "PCM0", "In PCM0 Switch"},
+ { "MIX FM", "PCM1", "In PCM1 Switch"},
+ { "MIX FM", "FM", "In FM Switch"},
+ /* loops have output switches coming back to mixers */
+ { "MIX FM", "Sprot_L0", "Mix Sprot L0 Switch"},
+ { "MIX FM", "Media_L1", "Mix Media L1 Switch"},
+ { "MIX FM", "Media_L2", "Mix Media L2 Switch"},
+ /* sidetone comes from speech out */
+ { "MIX FM", "Sidetone", "Mix Speech Tx Switch"},
+
+ /* now connect the mixers to output switches */
+ { "Mix Modem Switch", "Switch", "MIX Modem"},
+ { "Out Modem Switch", "Switch", "Mix Modem Switch"},
+ { "Mix Codec0 Switch", "Switch", "MIX Codec0"},
+ { "Out Codec0 Switch", "Switch", "Mix Codec0 Switch"},
+ { "Mix Codec1 Switch", "Switch", "MIX Codec1"},
+ { "Out Codec1 Switch", "Switch", "Mix Codec1 Switch"},
+ { "Mix Speech Tx Switch", "Switch", "MIX Speech Tx"},
+ { "Out Speech Tx Switch", "Switch", "Mix Speech Tx Switch"},
+ { "Mix Speech Rx Switch", "Switch", "MIX Speech Rx"},
+ { "Out Speech Rx Switch", "Switch", "Mix Speech Rx Switch"},
+ { "Mix Voip Switch", "Switch", "MIX Voip"},
+ { "Out Voip Switch", "Switch", "Mix Voip Switch"},
+ { "Mix Aware Switch", "Switch", "MIX Aware"},
+ { "Out Aware Switch", "Switch", "Mix Aware Switch"},
+ { "Mix VAD Switch", "Switch", "MIX VAD"},
+ { "Out VAD Switch", "Switch", "Mix VAD Switch"},
+ { "Mix FM Switch", "Switch", "MIX FM"},
+ { "Out FM Switch", "Switch", "Mix FM Switch"},
+ { "Mix PCM1 Switch", "Switch", "MIX PCM1"},
+ { "Out PCM1 Switch", "Switch", "Mix PCM1 Switch"},
+ { "Mix PCM2 Switch", "Switch", "MIX PCM2"},
+ { "Out PCM2 Switch", "Switch", "Mix PCM2 Switch"},
+
+ /* the loops
+ * media loops dont have i/p o/p switches, just mixer enable
+ */
+ { "Mix Sprot L0 Switch", "Switch", "MIX Sprot L0"},
+ { "Mix Media L1 Switch", "Switch", "MIX Media L1"},
+ { "Mix Media L2 Switch", "Switch", "MIX Media L2"},
+ /* so no need as mixer switches are
+ * inputs to all mixers
+ * need to connect speech loops here
+ */
+ { "In Speech Rx Switch", "Switch", "Out Speech Rx Switch"},
+ { "In Speech Tx Switch", "Switch", "Out Speech Tx Switch"},
+ /* last one, connect the output switches to ip's
+ * and op's. Also connect the AIFs
+ */
+ { "In Modem Switch", "Switch", "Modem IN"},
+ { "In Codec0 Switch", "Switch", "Codec IN0"},
+ { "In Codec1 Switch", "Switch", "Codec IN1"},
+ { "In Tone Switch", "Switch", "Tone IN"},
+ { "In FM Switch", "Switch", "FM IN"},
+
+ { "Modem OUT", NULL, "Out Modem Switch"},
+ { "Codec OUT0", NULL, "Out Codec0 Switch"},
+ { "Codec OUT1", NULL, "Out Codec1 Switch"},
+ { "FM OUT", NULL, "Out FM Switch"},
+
+ { "In Voip Switch", "Switch", "Voip IN"},
+
+ { "Voip OUT", NULL, "Out Voip Switch"},
+ { "PCM1 OUT", NULL, "Out PCM1 Switch"},
+ { "Aware OUT", NULL, "Out Aware Switch"},
+ { "VAD OUT", NULL, "Out VAD Switch"},
+};
+
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ pr_debug("in %s\n", __func__);
+ memcpy(ucontrol->value.bytes.data, sst->byte_stream, SST_MAX_BIN_BYTES);
+ print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+ (const void *)sst->byte_stream, 32);
+ return 0;
+}
+
+static int sst_check_binary_input(char *stream)
+{
+ struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *)stream;
+
+ if (bytes->len == 0 || bytes->len > 1000) {
+ pr_err("length out of bounds %d\n", bytes->len);
+ return -EINVAL;
+ }
+ if (bytes->type == 0 || bytes->type > SND_SST_BYTES_GET) {
+ pr_err("type out of bounds: %d\n", bytes->type);
+ return -EINVAL;
+ }
+ if (bytes->block > 1) {
+ pr_err("block invalid %d\n", bytes->block);
+ return -EINVAL;
+ }
+ if (bytes->task_id == SST_TASK_ID_NONE || bytes->task_id > SST_TASK_ID_MAX) {
+ pr_err("taskid invalid %d\n", bytes->task_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ int ret = 0;
+
+ pr_debug("in %s\n", __func__);
+ mutex_lock(&sst->lock);
+ memcpy(sst->byte_stream, ucontrol->value.bytes.data, SST_MAX_BIN_BYTES);
+ if (0 != sst_check_binary_input(sst->byte_stream)) {
+ mutex_unlock(&sst->lock);
+ return -EINVAL;
+ }
+ print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+ (const void *)sst->byte_stream, 32);
+ ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, sst->byte_stream);
+ mutex_unlock(&sst->lock);
+
+ return ret;
+}
+
+static int sst_pipe_id_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ int ret = 0;
+
+ ucontrol->value.integer.value[0] = sst->pipe_id;
+
+ return ret;
+}
+
+static int sst_pipe_id_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ int ret = 0;
+
+ sst->pipe_id = ucontrol->value.integer.value[0];
+ pr_debug("%s: pipe_id %d", __func__, sst->pipe_id);
+
+ return ret;
+}
+
+/* dB range for mrfld compress volume is -144dB to +36dB.
+ * Gain library expects user input in terms of 0.1dB, for example,
+ * 60 (in decimal) represents 6dB.
+ * MW will pass 2's complement value for negative dB values.
+ */
+static int sst_compr_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+ u16 gain;
+ unsigned int gain_offset, ret;
+
+ sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_GET, amc);
+ mutex_lock(&sst->lock);
+ ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+ sst->byte_stream);
+ mutex_unlock(&sst->lock);
+ if (ret) {
+ pr_err("failed to get compress vol from fw: %d\n", ret);
+ return ret;
+ }
+ gain_offset = sizeof(struct snd_sst_bytes_v2) +
+ sizeof(struct ipc_dsp_hdr);
+
+ /* Get params format for vol ctrl lib, size 6 bytes :
+ * u16 left_gain, u16 right_gain, u16 ramp
+ */
+ memcpy(&gain,
+ (unsigned int *)(sst->byte_stream + gain_offset),
+ sizeof(u16));
+ pr_debug("%s: cell_gain = %d\n", __func__, gain);
+ amc->value = gain;
+ ucontrol->value.integer.value[0] = gain;
+ return 0;
+}
+
+static int sst_compr_vol_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+ int ret = 0;
+ unsigned int old_val;
+
+ pr_debug("%s: cell_gain = %ld\n", __func__,\
+ ucontrol->value.integer.value[0]);
+ old_val = amc->value;
+ amc->value = ucontrol->value.integer.value[0];
+ sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_SET,
+ amc);
+
+ mutex_lock(&sst->lock);
+ ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+ sst->byte_stream);
+ mutex_unlock(&sst->lock);
+ if (ret) {
+ pr_err("failed to set compress vol in fw: %d\n", ret);
+ amc->value = old_val;
+ return ret;
+ }
+ return 0;
+}
+
+static int sst_vtsv_enroll_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ int ret = 0;
+
+ sst->vtsv_enroll = ucontrol->value.integer.value[0];
+ mutex_lock(&sst->lock);
+ if (sst->vtsv_enroll)
+ ret = sst_dsp->ops->set_generic_params(SST_SET_VTSV_INFO,
+ (void *)&sst->vtsv_enroll);
+ mutex_unlock(&sst->lock);
+ return ret;
+}
+
+static int sst_vtsv_enroll_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ ucontrol->value.integer.value[0] = sst->vtsv_enroll;
+ return 0;
+}
+
+/* This value corresponds to two's complement value of -10 or -1dB */
+#define SST_COMPR_VOL_MAX_INTEG_GAIN 0xFFF6
+#define SST_COMPR_VOL_MUTE 0xFA60 /* 2's complement of -1440 or -144dB*/
+
+
+static const struct snd_kcontrol_new sst_mrfld_controls[] = {
+ SND_SOC_BYTES_EXT("SST Byte control", SST_MAX_BIN_BYTES,
+ sst_byte_control_get, sst_byte_control_set),
+ SOC_SINGLE_EXT("SST Pipe_id control", SST_PIPE_CONTROL, 0, 0x9A, 0,
+ sst_pipe_id_control_get, sst_pipe_id_control_set),
+ SST_ALGO_KCONTROL_INT("Compress Volume", SST_COMPRESS_VOL,
+ 0, SST_COMPR_VOL_MAX_INTEG_GAIN, 0,
+ sst_compr_vol_get, sst_compr_vol_set,
+ SST_ALGO_VOLUME_CONTROL, PIPE_MEDIA0_IN, 0,
+ SST_COMPR_VOL_MUTE),
+ SOC_SINGLE_BOOL_EXT("SST VTSV Enroll", 0, sst_vtsv_enroll_get,
+ sst_vtsv_enroll_set),
+};
+
+static DEVICE_ULONG_ATTR(low_latency_threshold, 0644, ll_threshold);
+static DEVICE_ULONG_ATTR(deep_buffer_threshold, 0644, db_threshold);
+
+static struct attribute *device_sysfs_attrs[] = {
+ &dev_attr_low_latency_threshold.attr.attr,
+ &dev_attr_deep_buffer_threshold.attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = device_sysfs_attrs,
+};
+
+int sst_dsp_init(struct snd_soc_platform *platform)
+{
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ int error = 0;
+
+ sst->byte_stream = devm_kzalloc(platform->dev,
+ SST_MAX_BIN_BYTES, GFP_KERNEL);
+ if (sst->byte_stream == NULL) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ sst->widget = devm_kzalloc(platform->dev,
+ SST_NUM_WIDGETS * sizeof(*sst->widget),
+ GFP_KERNEL);
+ if (sst->widget == NULL) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ sst->vtsv_enroll = false;
+ /* Assign the pointer variables */
+ sst->ll_db.low_latency = &ll_threshold;
+ sst->ll_db.deep_buffer = &db_threshold;
+
+ pr_debug("Default ll thres %lu db thres %lu\n", ll_threshold, db_threshold);
+
+ snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+ ARRAY_SIZE(sst_dapm_widgets));
+ snd_soc_dapm_add_routes(&platform->dapm, intercon,
+ ARRAY_SIZE(intercon));
+ snd_soc_dapm_new_widgets(&platform->dapm);
+ snd_soc_add_platform_controls(platform, sst_mrfld_controls,
+ ARRAY_SIZE(sst_mrfld_controls));
+
+ error = sysfs_create_group(&platform->dev->kobj, &attr_group);
+ if (error)
+ pr_err("failed to create sysfs files %d\n", error);
+
+ return error;
+}
--- /dev/null
+/*
+ * controls_v2.h - Intel MID Platform driver header file
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Ramesh Babu <ramesh.babu.koul@intel.com>
+ * Author: Omair M Abdullah <omair.m.abdullah@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SST_CONTROLS_V2_H__
+#define __SST_CONTROLS_V2_H__
+
+/*
+ * This section defines the map for the mixer widgets.
+ *
+ * Each mixer will be represented by single value and that value will have each
+ * bit corresponding to one input
+ *
+ * Each out_id will correspond to one mixer and one path. Each input will be
+ * represented by single bit in the register.
+ */
+
+/* mixer register ids here */
+#define SST_MIX(x) (x)
+
+#define SST_MIX_MODEM SST_MIX(0)
+#define SST_MIX_BT SST_MIX(1)
+#define SST_MIX_CODEC0 SST_MIX(2)
+#define SST_MIX_CODEC1 SST_MIX(3)
+#define SST_MIX_LOOP0 SST_MIX(4)
+#define SST_MIX_LOOP1 SST_MIX(5)
+#define SST_MIX_LOOP2 SST_MIX(6)
+#define SST_MIX_PROBE SST_MIX(7)
+#define SST_MIX_HF_SNS SST_MIX(8)
+#define SST_MIX_HF SST_MIX(9)
+#define SST_MIX_SPEECH SST_MIX(10)
+#define SST_MIX_RXSPEECH SST_MIX(11)
+#define SST_MIX_VOIP SST_MIX(12)
+#define SST_MIX_PCM0 SST_MIX(13)
+#define SST_MIX_PCM1 SST_MIX(14)
+#define SST_MIX_PCM2 SST_MIX(15)
+#define SST_MIX_AWARE SST_MIX(16)
+#define SST_MIX_VAD SST_MIX(17)
+#define SST_MIX_FM SST_MIX(18)
+
+#define SST_MIX_MEDIA0 SST_MIX(19)
+#define SST_MIX_MEDIA1 SST_MIX(20)
+
+#define SST_NUM_MIX (SST_MIX_MEDIA1 + 1)
+
+#define SST_MIX_SWITCH (SST_NUM_MIX + 1)
+#define SST_OUT_SWITCH (SST_NUM_MIX + 2)
+#define SST_IN_SWITCH (SST_NUM_MIX + 3)
+#define SST_MUX_REG (SST_NUM_MIX + 4)
+#define SST_REG_LAST (SST_MUX_REG)
+
+/* last entry defines array size */
+#define SST_NUM_WIDGETS (SST_REG_LAST + 1)
+
+#define SST_BT_FM_MUX_SHIFT 0
+#define SST_VOICE_MODE_SHIFT 1
+#define SST_BT_MODE_SHIFT 2
+
+/* in each mixer register we will define one bit for each input */
+#define SST_MIX_IP(x) (x)
+
+#define SST_IP_MODEM SST_MIX_IP(0)
+#define SST_IP_BT SST_MIX_IP(1)
+#define SST_IP_CODEC0 SST_MIX_IP(2)
+#define SST_IP_CODEC1 SST_MIX_IP(3)
+#define SST_IP_LOOP0 SST_MIX_IP(4)
+#define SST_IP_LOOP1 SST_MIX_IP(5)
+#define SST_IP_LOOP2 SST_MIX_IP(6)
+#define SST_IP_PROBE SST_MIX_IP(7)
+#define SST_IP_SIDETONE SST_MIX_IP(8)
+#define SST_IP_TXSPEECH SST_MIX_IP(9)
+#define SST_IP_SPEECH SST_MIX_IP(10)
+#define SST_IP_TONE SST_MIX_IP(11)
+#define SST_IP_VOIP SST_MIX_IP(12)
+#define SST_IP_PCM0 SST_MIX_IP(13)
+#define SST_IP_PCM1 SST_MIX_IP(14)
+#define SST_IP_LOW_PCM0 SST_MIX_IP(15)
+#define SST_IP_FM SST_MIX_IP(16)
+#define SST_IP_MEDIA0 SST_MIX_IP(17)
+#define SST_IP_MEDIA1 SST_MIX_IP(18)
+#define SST_IP_MEDIA2 SST_MIX_IP(19)
+#define SST_IP_MEDIA3 SST_MIX_IP(20)
+
+#define SST_IP_LAST SST_IP_MEDIA3
+
+#define SST_SWM_INPUT_COUNT (SST_IP_LAST + 1)
+#define SST_CMD_SWM_MAX_INPUTS 6
+
+#define SST_PATH_ID_SHIFT 8
+#define SST_DEFAULT_LOCATION_ID 0xFFFF
+#define SST_DEFAULT_CELL_NBR 0xFF
+#define SST_DEFAULT_MODULE_ID 0xFFFF
+
+/*
+ * Audio DSP Path Ids. Specified by the audio DSP FW
+ */
+enum sst_path_index {
+ SST_PATH_INDEX_MODEM_OUT = (0x00 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_BT_OUT = (0x01 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_OUT0 = (0x02 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_OUT1 = (0x03 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SPROT_LOOP_OUT = (0x04 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT = (0x05 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT = (0x06 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE_OUT = (0x07 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_HF_SNS_OUT = (0x08 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VOICE_UPLINK_REF2 = (0x08 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_HF_OUT = (0x09 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VOICE_UPLINK_REF1 = (0x09 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SPEECH_OUT = (0x0A << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VOICE_UPLINK = (0x0A << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_RX_SPEECH_OUT = (0x0B << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VOICE_DOWNLINK = (0x0B << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_VOIP_OUT = (0x0C << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM0_OUT = (0x0D << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM1_OUT = (0x0E << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM2_OUT = (0x0F << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_AWARE_OUT = (0x10 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VAD_OUT = (0x11 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA0_OUT = (0x12 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA1_OUT = (0x13 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_FM_OUT = (0x14 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_PROBE1_PIPE_OUT = (0x15 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE2_PIPE_OUT = (0x16 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE3_PIPE_OUT = (0x17 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE4_PIPE_OUT = (0x18 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE5_PIPE_OUT = (0x19 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE6_PIPE_OUT = (0x1A << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE7_PIPE_OUT = (0x1B << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE8_PIPE_OUT = (0x1C << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SIDETONE_OUT = (0x1D << SST_PATH_ID_SHIFT),
+
+ /* Start of input paths */
+ SST_PATH_INDEX_MODEM_IN = (0x80 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_BT_IN = (0x81 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_IN0 = (0x82 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_IN1 = (0x83 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SPROT_LOOP_IN = (0x84 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP1_IN = (0x85 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP2_IN = (0x86 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_PROBE_IN = (0x87 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_SIDETONE_IN = (0x88 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_TX_SPEECH_IN = (0x89 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_SPEECH_IN = (0x8A << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_TONE_IN = (0x8B << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_VOIP_IN = (0x8C << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_PCM0_IN = (0x8D << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM1_IN = (0x8E << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA0_IN = (0x8F << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA1_IN = (0x90 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA2_IN = (0x91 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_FM_IN = (0x92 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_PROBE1_PIPE_IN = (0x93 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE2_PIPE_IN = (0x94 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE3_PIPE_IN = (0x95 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE4_PIPE_IN = (0x96 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE5_PIPE_IN = (0x97 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE6_PIPE_IN = (0x98 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE7_PIPE_IN = (0x99 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PROBE8_PIPE_IN = (0x9A << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA3_IN = (0x9C << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_LOW_PCM0_IN = (0x9D << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_RESERVED = (0xFF << SST_PATH_ID_SHIFT),
+};
+
+/*
+ * switch matrix input path IDs
+ */
+enum sst_swm_inputs {
+ SST_SWM_IN_MODEM = (SST_PATH_INDEX_MODEM_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_BT = (SST_PATH_INDEX_BT_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_CODEC0 = (SST_PATH_INDEX_CODEC_IN0 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_CODEC1 = (SST_PATH_INDEX_CODEC_IN1 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA_LOOP1 = (SST_PATH_INDEX_MEDIA_LOOP1_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA_LOOP2 = (SST_PATH_INDEX_MEDIA_LOOP2_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_PROBE = (SST_PATH_INDEX_PROBE_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_SIDETONE = (SST_PATH_INDEX_SIDETONE_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_TXSPEECH = (SST_PATH_INDEX_TX_SPEECH_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_SPEECH = (SST_PATH_INDEX_SPEECH_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_TONE = (SST_PATH_INDEX_TONE_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_VOIP = (SST_PATH_INDEX_VOIP_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_PCM0 = (SST_PATH_INDEX_PCM0_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_PCM1 = (SST_PATH_INDEX_PCM1_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA0 = (SST_PATH_INDEX_MEDIA0_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_MEDIA1 = (SST_PATH_INDEX_MEDIA1_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_MEDIA2 = (SST_PATH_INDEX_MEDIA2_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_FM = (SST_PATH_INDEX_FM_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA3 = (SST_PATH_INDEX_MEDIA3_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_LOW_PCM0 = (SST_PATH_INDEX_LOW_PCM0_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_END = (SST_PATH_INDEX_RESERVED | SST_DEFAULT_CELL_NBR)
+};
+
+/*
+ * switch matrix output path IDs
+ */
+enum sst_swm_outputs {
+ SST_SWM_OUT_MODEM = (SST_PATH_INDEX_MODEM_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_BT = (SST_PATH_INDEX_BT_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_CODEC0 = (SST_PATH_INDEX_CODEC_OUT0 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_CODEC1 = (SST_PATH_INDEX_CODEC_OUT1 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA_LOOP1 = (SST_PATH_INDEX_MEDIA_LOOP1_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA_LOOP2 = (SST_PATH_INDEX_MEDIA_LOOP2_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PROBE = (SST_PATH_INDEX_PROBE_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_HF_SNS = (SST_PATH_INDEX_HF_SNS_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_HF = (SST_PATH_INDEX_HF_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_SPEECH = (SST_PATH_INDEX_SPEECH_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_RXSPEECH = (SST_PATH_INDEX_RX_SPEECH_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_VOIP = (SST_PATH_INDEX_VOIP_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM0 = (SST_PATH_INDEX_PCM0_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM1 = (SST_PATH_INDEX_PCM1_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM2 = (SST_PATH_INDEX_PCM2_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_AWARE = (SST_PATH_INDEX_AWARE_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_VAD = (SST_PATH_INDEX_VAD_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA0 = (SST_PATH_INDEX_MEDIA0_OUT | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_OUT_MEDIA1 = (SST_PATH_INDEX_MEDIA1_OUT | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_OUT_FM = (SST_PATH_INDEX_FM_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_END = (SST_PATH_INDEX_RESERVED | SST_DEFAULT_CELL_NBR),
+};
+
+enum sst_ipc_msg {
+ SST_IPC_IA_CMD = 1,
+ SST_IPC_IA_SET_PARAMS,
+ SST_IPC_IA_GET_PARAMS,
+};
+
+enum sst_cmd_type {
+ SST_CMD_BYTES_SET = 1,
+ SST_CMD_BYTES_GET = 2,
+};
+
+enum sst_task {
+ SST_TASK_SBA = 1,
+ SST_TASK_FBA_UL,
+ SST_TASK_MMX,
+ SST_TASK_AWARE,
+ SST_TASK_FBA_DL,
+};
+
+enum sst_type {
+ SST_TYPE_CMD = 1,
+ SST_TYPE_PARAMS,
+};
+
+enum sst_flag {
+ SST_FLAG_BLOCKED = 1,
+ SST_FLAG_NONBLOCK,
+};
+
+/*
+ * Enumeration for indexing the gain cells in VB_SET_GAIN DSP command
+ */
+enum sst_gain_index {
+ /* GAIN IDs for SB task start here */
+ SST_GAIN_INDEX_MODEM_OUT,
+ SST_GAIN_INDEX_MODEM_IN,
+ SST_GAIN_INDEX_BT_OUT,
+ SST_GAIN_INDEX_BT_IN,
+ SST_GAIN_INDEX_FM_OUT,
+
+ SST_GAIN_INDEX_FM_IN,
+ SST_GAIN_INDEX_CODEC_OUT0,
+ SST_GAIN_INDEX_CODEC_OUT1,
+ SST_GAIN_INDEX_CODEC_IN0,
+ SST_GAIN_INDEX_CODEC_IN1,
+
+ SST_GAIN_INDEX_SPROT_LOOP_OUT,
+ SST_GAIN_INDEX_MEDIA_LOOP1_OUT,
+ SST_GAIN_INDEX_MEDIA_LOOP2_OUT,
+ SST_GAIN_INDEX_RX_SPEECH_OUT,
+ SST_GAIN_INDEX_TX_SPEECH_IN,
+
+ SST_GAIN_INDEX_SPEECH_OUT,
+ SST_GAIN_INDEX_SPEECH_IN,
+ SST_GAIN_INDEX_HF_OUT,
+ SST_GAIN_INDEX_HF_SNS_OUT,
+ SST_GAIN_INDEX_TONE_IN,
+
+ SST_GAIN_INDEX_SIDETONE_IN,
+ SST_GAIN_INDEX_PROBE_OUT,
+ SST_GAIN_INDEX_PROBE_IN,
+ SST_GAIN_INDEX_PCM0_IN_LEFT,
+ SST_GAIN_INDEX_PCM0_IN_RIGHT,
+
+ SST_GAIN_INDEX_PCM1_OUT_LEFT,
+ SST_GAIN_INDEX_PCM1_OUT_RIGHT,
+ SST_GAIN_INDEX_PCM1_IN_LEFT,
+ SST_GAIN_INDEX_PCM1_IN_RIGHT,
+ SST_GAIN_INDEX_PCM2_OUT_LEFT,
+
+ SST_GAIN_INDEX_PCM2_OUT_RIGHT,
+ SST_GAIN_INDEX_VOIP_OUT,
+ SST_GAIN_INDEX_VOIP_IN,
+ SST_GAIN_INDEX_AWARE_OUT,
+ SST_GAIN_INDEX_VAD_OUT,
+
+ /* Gain IDs for FBA task start here */
+ SST_GAIN_INDEX_VOICE_UL,
+
+ /* Gain IDs for MMX task start here */
+ SST_GAIN_INDEX_MEDIA0_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA0_IN_RIGHT,
+ SST_GAIN_INDEX_MEDIA1_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA1_IN_RIGHT,
+
+ SST_GAIN_INDEX_MEDIA2_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA2_IN_RIGHT,
+
+ SST_GAIN_INDEX_GAIN_END
+};
+
+/*
+ * Audio DSP module IDs specified by FW spec
+ * TODO: Update with all modules
+ */
+enum sst_module_id {
+ SST_MODULE_ID_GAIN_CELL = 0x0067,
+ SST_MODULE_ID_SPROT = 0x006D,
+ SST_MODULE_ID_NR = 0x0076,
+ SST_MODULE_ID_BWX = 0x0077,
+ SST_MODULE_ID_DRP = 0x0078,
+ SST_MODULE_ID_MDRP = 0x0079,
+
+ SST_MODULE_ID_ANA = 0x007A,
+ SST_MODULE_ID_AEC = 0x007B,
+ SST_MODULE_ID_NR_SNS = 0x007C,
+ SST_MODULE_ID_SER = 0x007D,
+ SST_MODULE_ID_AGC = 0x007E,
+
+ SST_MODULE_ID_CNI = 0x007F,
+ SST_MODULE_ID_CONTEXT_ALGO_AWARE = 0x0080,
+ SST_MODULE_ID_FIR_24 = 0x0081,
+ SST_MODULE_ID_IIR_24 = 0x0082,
+ SST_MODULE_ID_FILT_DCR = 0x0082,
+
+ SST_MODULE_ID_ASRC = 0x0083,
+ SST_MODULE_ID_TONE_GEN = 0x0084,
+ SST_MODULE_ID_BMF = 0x0086,
+ SST_MODULE_ID_EDL = 0x0087,
+ SST_MODULE_ID_GLC = 0x0088,
+
+ SST_MODULE_ID_FIR_16 = 0x0089,
+ SST_MODULE_ID_IIR_16 = 0x008A,
+ SST_MODULE_ID_DNR = 0x008B,
+
+ SST_MODULE_ID_CNI_TX = 0x0090,
+ SST_MODULE_ID_REF_LINE = 0x0091,
+ SST_MODULE_ID_VOLUME = 0x0092,
+
+ SST_MODULE_ID_TASK = 0xFFFF
+};
+
+enum sst_cmd {
+ SBA_IDLE = 14,
+ SBA_VB_SET_SPEECH_PATH = 26,
+ MMX_SET_GAIN = 33,
+ SBA_VB_SET_GAIN = 33,
+ FBA_VB_RX_CNI = 35,
+ MMX_SET_GAIN_TIMECONST = 36,
+ SBA_VB_SET_TIMECONST = 36,
+ FBA_VB_ANA = 37,
+ FBA_VB_SET_FIR = 38,
+ FBA_VB_SET_IIR = 39,
+ FBA_VB_AEC = 47,
+ FBA_VB_NR_UL = 48,
+ FBA_VB_AGC = 49,
+ FBA_VB_NR_DL = 55,
+ SBA_PROBE = 66,
+ MMX_PROBE = 66,
+ FBA_VB_SET_BIQUAD_D_C = 69,
+ FBA_VB_DUAL_BAND_COMP = 70,
+ FBA_VB_SNS = 72,
+ FBA_VB_SER = 78,
+ FBA_VB_TX_CNI = 80,
+ SBA_VB_START = 85,
+ FBA_VB_SET_REF_LINE = 94,
+ FBA_VB_SET_DELAY_LINE = 95,
+ FBA_VB_BWX = 104,
+ FBA_VB_GMM = 105,
+ FBA_VB_GLC = 107,
+ FBA_VB_BMF = 111,
+ FBA_VB_DNR = 113,
+ MMX_SET_SWM = 114,
+ SBA_SET_SWM = 114,
+ SBA_SET_MDRP = 116,
+ SBA_HW_SET_SSP = 117,
+ SBA_SET_MEDIA_LOOP_MAP = 118,
+ SBA_SET_MEDIA_PATH = 119,
+ MMX_SET_MEDIA_PATH = 119,
+ SBA_VB_LPRO = 126,
+ SBA_VB_SET_FIR = 128,
+ SBA_VB_SET_IIR = 129,
+ SBA_SET_SSP_SLOT_MAP = 130,
+ AWARE_ENV_CLASS_PARAMS = 130,
+};
+
+enum sst_dsp_switch {
+ SST_SWITCH_OFF = 0,
+ SST_SWITCH_ON = 3,
+};
+
+enum sst_path_switch {
+ SST_PATH_OFF = 0,
+ SST_PATH_ON = 1,
+};
+
+enum sst_swm_state {
+ SST_SWM_OFF = 0,
+ SST_SWM_ON = 3,
+};
+
+#define SST_FILL_LOCATION_IDS(dst, cell_idx, pipe_id) do { \
+ dst.location_id.p.cell_nbr_idx = (cell_idx); \
+ dst.location_id.p.path_id = (pipe_id); \
+ } while (0)
+#define SST_FILL_LOCATION_ID(dst, loc_id) (\
+ dst.location_id.f = (loc_id))
+#define SST_FILL_MODULE_ID(dst, mod_id) (\
+ dst.module_id = (mod_id))
+
+#define SST_FILL_DESTINATION1(dst, id) do { \
+ SST_FILL_LOCATION_ID(dst, (id) & 0xFFFF); \
+ SST_FILL_MODULE_ID(dst, ((id) & 0xFFFF0000) >> 16); \
+ } while (0)
+#define SST_FILL_DESTINATION2(dst, loc_id, mod_id) do { \
+ SST_FILL_LOCATION_ID(dst, loc_id); \
+ SST_FILL_MODULE_ID(dst, mod_id); \
+ } while (0)
+#define SST_FILL_DESTINATION3(dst, cell_idx, path_id, mod_id) do { \
+ SST_FILL_LOCATION_IDS(dst, cell_idx, path_id); \
+ SST_FILL_MODULE_ID(dst, mod_id); \
+ } while (0)
+
+#define SST_FILL_DESTINATION(level, dst, ...) \
+ SST_FILL_DESTINATION##level(dst, __VA_ARGS__)
+#define SST_FILL_DEFAULT_DESTINATION(dst) \
+ SST_FILL_DESTINATION(2, dst, SST_DEFAULT_LOCATION_ID, SST_DEFAULT_MODULE_ID)
+
+struct sst_destination_id {
+ union sst_location_id {
+ struct {
+ u8 cell_nbr_idx; /* module index */
+ u8 path_id; /* pipe_id */
+ } __packed p; /* part */
+ u16 f; /* full */
+ } __packed location_id;
+ u16 module_id;
+} __packed;
+
+struct sst_dsp_header {
+ struct sst_destination_id dst;
+ u16 command_id;
+ u16 length;
+} __packed;
+
+/*
+ *
+ * Common Commands
+ *
+ */
+struct sst_cmd_generic {
+ struct sst_dsp_header header;
+} __packed;
+
+struct swm_input_ids {
+ struct sst_destination_id input_id;
+} __packed;
+
+struct sst_cmd_set_swm {
+ struct sst_dsp_header header;
+ struct sst_destination_id output_id;
+ u16 switch_state;
+ u16 nb_inputs;
+ struct swm_input_ids input[SST_CMD_SWM_MAX_INPUTS];
+} __packed;
+
+struct sst_cmd_set_media_path {
+ struct sst_dsp_header header;
+ u16 switch_state;
+} __packed;
+
+struct sst_cmd_set_speech_path {
+ struct sst_dsp_header header;
+ u16 switch_state;
+ struct {
+ u16 rsvd:8;
+ u16 sample_length:2;
+ u16 rate:3;
+ u16 format:3;
+ } config;
+} __packed;
+
+struct gain_cell {
+ struct sst_destination_id dest;
+ s16 cell_gain_left;
+ s16 cell_gain_right;
+ u16 gain_time_constant;
+} __packed;
+
+#define NUM_GAIN_CELLS 1
+struct sst_cmd_set_gain_dual {
+ struct sst_dsp_header header;
+ u16 gain_cell_num;
+ struct gain_cell cell_gains[NUM_GAIN_CELLS];
+} __packed;
+
+struct sst_cmd_set_params {
+ struct sst_destination_id dst;
+ u16 command_id;
+ char params[0];
+} __packed;
+
+/*
+ *
+ * Media (MMX) commands
+ *
+ */
+
+/*
+ *
+ * SBA commands
+ *
+ */
+struct sst_cmd_sba_vb_start {
+ struct sst_dsp_header header;
+} __packed;
+
+union sba_media_loop_params {
+ struct {
+ u16 rsvd:8;
+ u16 sample_length:2;
+ u16 rate:3;
+ u16 format:3;
+ } part;
+ u16 full;
+} __packed;
+
+struct sst_cmd_sba_set_media_loop_map {
+ struct sst_dsp_header header;
+ u16 switch_state;
+ union sba_media_loop_params param;
+ u16 map;
+} __packed;
+
+enum sst_ssp_mode {
+ SSP_MODE_MASTER = 0,
+ SSP_MODE_SLAVE = 1,
+};
+
+enum sst_ssp_pcm_mode {
+ SSP_PCM_MODE_NORMAL = 0,
+ SSP_PCM_MODE_NETWORK = 1,
+};
+
+enum sst_ssp_duplex {
+ SSP_DUPLEX = 0,
+ SSP_RX = 1,
+ SSP_TX = 2,
+};
+
+enum sst_ssp_fs_frequency {
+ SSP_FS_8_KHZ = 0,
+ SSP_FS_16_KHZ = 1,
+ SSP_FS_44_1_KHZ = 2,
+ SSP_FS_48_KHZ = 3,
+};
+
+enum sst_ssp_fs_polarity {
+ SSP_FS_ACTIVE_LOW = 0,
+ SSP_FS_ACTIVE_HIGH = 1,
+};
+
+enum sst_ssp_protocol {
+ SSP_MODE_PCM = 0,
+ SSP_MODE_I2S = 1,
+};
+
+enum sst_ssp_port_id {
+ SSP_MODEM = 0,
+ SSP_BT = 1,
+ SSP_FM = 2,
+ SSP_CODEC = 3,
+};
+
+struct sst_cmd_sba_hw_set_ssp {
+ struct sst_dsp_header header;
+ u16 selection; /* 0:SSP0(def), 1:SSP1, 2:SSP2 */
+
+ u16 switch_state;
+
+ u16 nb_bits_per_slots:6; /* 0-32 bits, 24 (def) */
+ u16 nb_slots:4; /* 0-8: slots per frame */
+ u16 mode:3; /* 0:Master, 1: Slave */
+ u16 duplex:3;
+
+ u16 active_tx_slot_map:8; /* Bit map, 0:off, 1:on */
+ u16 reserved1:8;
+
+ u16 active_rx_slot_map:8; /* Bit map 0: Off, 1:On */
+ u16 reserved2:8;
+
+ u16 frame_sync_frequency;
+
+ u16 frame_sync_polarity:8;
+ u16 data_polarity:8;
+
+ u16 frame_sync_width; /* 1 to N clocks */
+ u16 ssp_protocol:8;
+ u16 start_delay:8; /* Start delay in terms of clock ticks */
+} __packed;
+
+#define SST_MAX_TDM_SLOTS 8
+
+struct sst_param_sba_ssp_slot_map {
+ struct sst_dsp_header header;
+
+ u16 param_id;
+ u16 param_len;
+ u16 ssp_index;
+
+ u8 rx_slot_map[SST_MAX_TDM_SLOTS];
+ u8 tx_slot_map[SST_MAX_TDM_SLOTS];
+} __packed;
+
+enum {
+ SST_PROBE_EXTRACTOR = 0,
+ SST_PROBE_INJECTOR = 1,
+};
+
+struct sst_cmd_probe {
+ struct sst_dsp_header header;
+
+ u16 switch_state;
+ struct sst_destination_id probe_dst;
+
+ u16 shared_mem:1;
+ u16 probe_in:1;
+ u16 probe_out:1;
+ u16 rsvd_1:13;
+
+ u16 rsvd_2:5;
+ u16 probe_mode:2;
+ u16 rsvd_3:1;
+ u16 sample_length:2;
+ u16 rate:3;
+ u16 format:3;
+
+ u16 sm_buf_id;
+
+ u16 gain[6];
+ u16 rsvd_4[9];
+} __packed;
+
+struct sst_probe_config {
+ const char *name;
+ u16 loc_id;
+ u16 mod_id;
+ u8 task_id;
+ struct pcm_cfg {
+ u8 s_length:2;
+ u8 rate:3;
+ u8 format:3;
+ } cfg;
+};
+
+int sst_mix_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int sst_mix_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+#endif
--- /dev/null
+/*
+ * controls_v2_dpcm.c - Intel MID Platform driver DPCM ALSA controls for Mrfld
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "controls_v2.h"
+#include "sst_widgets.h"
+
+static inline void sst_fill_byte_control(char *param,
+ u8 ipc_msg, u8 block,
+ u8 task_id, u8 pipe_id,
+ u16 len, void *cmd_data)
+{
+
+ struct snd_sst_bytes_v2 *byte_data = (struct snd_sst_bytes_v2 *)param;
+ byte_data->type = SST_CMD_BYTES_SET;
+ byte_data->ipc_msg = ipc_msg;
+ byte_data->block = block;
+ byte_data->task_id = task_id;
+ byte_data->pipe_id = pipe_id;
+
+ if (len > SST_MAX_BIN_BYTES - sizeof(*byte_data)) {
+ pr_err("%s: command length too big (%u)", __func__, len);
+ len = SST_MAX_BIN_BYTES - sizeof(*byte_data);
+ WARN_ON(1); /* this happens only if code is wrong */
+ }
+ byte_data->len = len;
+ memcpy(byte_data->bytes, cmd_data, len);
+ print_hex_dump_bytes("writing to lpe: ", DUMP_PREFIX_OFFSET,
+ byte_data, len + sizeof(*byte_data));
+}
+
+static int sst_fill_and_send_cmd(struct sst_data *sst,
+ u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+ void *cmd_data, u16 len)
+{
+ int ret = 0;
+
+ mutex_lock(&sst->lock);
+ sst_fill_byte_control(sst->byte_stream, ipc_msg, block, task_id, pipe_id,
+ len, cmd_data);
+ ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+ sst->byte_stream);
+ mutex_unlock(&sst->lock);
+
+ return ret;
+}
+
+static int sst_probe_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_probe_value *v = (void *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] = v->val;
+ return 0;
+}
+
+static int sst_probe_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_probe_value *v = (void *)kcontrol->private_value;
+ const struct soc_enum *e = v->p_enum;
+
+ if (ucontrol->value.enumerated.item[0] > e->max - 1)
+ return -EINVAL;
+ v->val = ucontrol->value.enumerated.item[0];
+ return 0;
+}
+
+int sst_probe_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_probe_value *v = (void *)kcontrol->private_value;
+ const struct soc_enum *e = v->p_enum;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = e->max;
+
+ if (uinfo->value.enumerated.item > e->max - 1)
+ uinfo->value.enumerated.item = e->max - 1;
+ strcpy(uinfo->value.enumerated.name,
+ e->texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/*
+ * slot map value is a bitfield where each bit represents a FW channel
+ *
+ * 3 2 1 0 # 0 = codec0, 1 = codec1
+ * RLRLRLRL # 3, 4 = reserved
+ *
+ * e.g. slot 0 rx map = 00001100b -> data from slot 0 goes into codec_in1 L,R
+ */
+static u8 sst_ssp_slot_map[SST_MAX_TDM_SLOTS] = {
+ 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default rx map */
+};
+
+/*
+ * channel map value is a bitfield where each bit represents a slot
+ *
+ * 76543210 # 0 = slot 0, 1 = slot 1
+ *
+ * e.g. codec1_0 tx map = 00000101b -> data from codec_out1_0 goes into slot 0, 2
+ */
+static u8 sst_ssp_channel_map[SST_MAX_TDM_SLOTS] = {
+ 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default tx map */
+};
+
+static int sst_slot_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ unsigned int ctl_no = e->reg;
+ unsigned int is_tx = e->reg2;
+ unsigned int val, mux;
+ u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+ val = 1 << ctl_no;
+ /* search which slot/channel has this bit set - there should be only one */
+ for (mux = e->max; mux > 0; mux--)
+ if (map[mux - 1] & val)
+ break;
+
+ ucontrol->value.enumerated.item[0] = mux;
+ pr_debug("%s: %s - %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+ e->texts[mux], mux ? map[mux - 1] : -1);
+ return 0;
+}
+
+/*
+ * (de)interleaver controls are defined in opposite sense to be user-friendly
+ *
+ * Instead of the enum value being the value set to the register, it is the
+ * register address; and the kcontrol_no is the value written to the register.
+ *
+ * This means that whenever an enum is set, we need to clear the bit
+ * for that kcontrol_no for all the interleaver OR deinterleaver registers
+ */
+static int sst_slot_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ int i;
+ unsigned int ctl_no = e->reg;
+ unsigned int is_tx = e->reg2;
+ unsigned int slot_channel_no;
+ unsigned int val, mux;
+ u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+ val = 1 << ctl_no;
+ mux = ucontrol->value.enumerated.item[0];
+ if (mux > e->max - 1)
+ return -EINVAL;
+
+ /* first clear all registers of this bit */
+ for (i = 0; i < e->max; i++)
+ map[i] &= ~val;
+
+ if (mux == 0) /* kctl set to 'none' */
+ return 0;
+
+ /* offset by one to take "None" into account */
+ slot_channel_no = mux - 1;
+ map[slot_channel_no] |= val;
+
+ pr_debug("%s: %s %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+ e->texts[mux], map[slot_channel_no]);
+ return 0;
+}
+
+/* assumes a boolean mux */
+static inline bool get_mux_state(struct sst_data *sst, unsigned int reg, unsigned int shift)
+{
+ return (sst_reg_read(sst, reg, shift, 1) == 1);
+}
+
+static int sst_mux_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ unsigned int max = e->max - 1;
+
+ ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+ return 0;
+}
+
+static int sst_mux_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ struct snd_soc_dapm_update update;
+ unsigned int max = e->max - 1;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int mux, val;
+
+ if (ucontrol->value.enumerated.item[0] > e->max - 1)
+ return -EINVAL;
+
+ mux = ucontrol->value.enumerated.item[0];
+ val = sst_reg_write(sst, e->reg, e->shift_l, max, mux);
+
+ pr_debug("%s: reg[%d] = %#x\n", __func__, e->reg, val);
+
+ widget->value = val;
+ update.kcontrol = kcontrol;
+ update.widget = widget;
+ update.reg = e->reg;
+ update.mask = mask;
+ update.val = val;
+
+ widget->dapm->update = &update;
+ snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+ widget->dapm->update = NULL;
+ return 0;
+}
+
+static int sst_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ unsigned int max = e->max - 1;
+
+ ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+ return 0;
+}
+
+static int sst_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct soc_enum *e = (void *)kcontrol->private_value;
+ unsigned int max = e->max - 1;
+ unsigned int val;
+
+ if (ucontrol->value.enumerated.item[0] > e->max - 1)
+ return -EINVAL;
+
+ val = sst_reg_write(sst, e->reg, e->shift_l, max, ucontrol->value.enumerated.item[0]);
+ pr_debug("%s: reg[%d] - %#x\n", __func__, e->reg, val);
+ return 0;
+}
+
+static void sst_send_algo_cmd(struct sst_data *sst,
+ struct sst_algo_control *bc)
+{
+ int len;
+ struct sst_cmd_set_params *cmd;
+
+ if (bc->params == NULL)
+ return;
+
+ len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max;
+
+ cmd = kzalloc(len + bc->max, GFP_KERNEL);
+ if (cmd == NULL) {
+ pr_err("Failed to send cmd, kzalloc failed\n");
+ return;
+ }
+
+ SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id);
+ cmd->command_id = bc->cmd_id;
+ memcpy(cmd->params, bc->params, bc->max);
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+ bc->task_id, 0, cmd, len);
+ kfree(cmd);
+
+}
+
+static void sst_find_and_send_pipe_algo(struct snd_soc_platform *platform,
+ struct snd_soc_dapm_widget *w)
+{
+ struct sst_algo_control *bc;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_ids *ids = w->priv;
+ struct module *algo = NULL;
+
+ pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+
+ list_for_each_entry(algo, &ids->algo_list, node) {
+ bc = (void *)algo->kctl->private_value;
+
+ pr_debug("Found algo control name =%s pipe=%s\n", algo->kctl->id.name, w->name);
+ sst_send_algo_cmd(sst, bc);
+ }
+}
+
+int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = bc->max;
+
+ if (bc->params == NULL) {
+ bc->params = devm_kzalloc(platform->dev, bc->max, GFP_KERNEL);
+ if (bc->params == NULL) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static int sst_algo_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+ pr_debug("in %s\n", __func__);
+ switch (bc->type) {
+ case SST_ALGO_PARAMS:
+ if (bc->params)
+ memcpy(ucontrol->value.bytes.data, bc->params, bc->max);
+ break;
+ case SST_ALGO_BYPASS:
+ ucontrol->value.integer.value[0] = bc->bypass ? 1 : 0;
+ pr_debug("%s: bypass %d\n", __func__, bc->bypass);
+ break;
+ default:
+ pr_err("Invalid Input- algo type:%d\n", bc->type);
+ return -EINVAL;
+
+ }
+ return 0;
+}
+
+static int sst_algo_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+ pr_debug("in %s control_name=%s\n", __func__, kcontrol->id.name);
+ switch (bc->type) {
+ case SST_ALGO_PARAMS:
+ if (bc->params)
+ memcpy(bc->params, ucontrol->value.bytes.data, bc->max);
+ break;
+ case SST_ALGO_BYPASS:
+ bc->bypass = !!ucontrol->value.integer.value[0];
+ pr_debug("%s: Mute %d\n", __func__, bc->bypass);
+ break;
+ default:
+ pr_err("Invalid Input- algo type:%ld\n", ucontrol->value.integer.value[0]);
+ return -EINVAL;
+ }
+ /*if pipe is enabled, need to send the algo params from here */
+ if (bc->w && bc->w->power)
+ sst_send_algo_cmd(sst, bc);
+
+ return 0;
+}
+
+static int sst_gain_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = mc->stereo ? 2 : 1;
+ uinfo->value.integer.min = mc->min;
+ uinfo->value.integer.max = mc->max;
+ return 0;
+}
+
+static void sst_send_gain_cmd(struct sst_data *sst, struct sst_gain_value *gv,
+ u16 task_id, u16 loc_id, u16 module_id, int mute)
+{
+ struct sst_cmd_set_gain_dual cmd;
+ pr_debug("%s", __func__);
+
+ cmd.header.command_id = MMX_SET_GAIN;
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.gain_cell_num = 1;
+
+ if (mute || gv->mute) {
+ cmd.cell_gains[0].cell_gain_left = SST_GAIN_MIN_VALUE;
+ cmd.cell_gains[0].cell_gain_right = SST_GAIN_MIN_VALUE;
+ } else {
+ cmd.cell_gains[0].cell_gain_left = gv->l_gain;
+ cmd.cell_gains[0].cell_gain_right = gv->r_gain;
+ }
+ SST_FILL_DESTINATION(2, cmd.cell_gains[0].dest,
+ loc_id, module_id);
+ cmd.cell_gains[0].gain_time_constant = gv->ramp_duration;
+
+ cmd.header.length = sizeof(struct sst_cmd_set_gain_dual)
+ - sizeof(struct sst_dsp_header);
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+ task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+ struct sst_gain_value *gv = mc->gain_val;
+
+ switch (mc->type) {
+ case SST_GAIN_TLV:
+ ucontrol->value.integer.value[0] = gv->l_gain;
+ ucontrol->value.integer.value[1] = gv->r_gain;
+ pr_debug("%s: Volume %d, %d\n", __func__, gv->l_gain, gv->r_gain);
+ break;
+ case SST_GAIN_MUTE:
+ ucontrol->value.integer.value[0] = gv->mute ? 1 : 0;
+ pr_debug("%s: Mute %d\n", __func__, gv->mute);
+ break;
+ case SST_GAIN_RAMP_DURATION:
+ ucontrol->value.integer.value[0] = gv->ramp_duration;
+ pr_debug("%s: RampDuration %d\n", __func__, gv->ramp_duration);
+ break;
+ default:
+ pr_err("Invalid Input- gain type:%d\n", mc->type);
+ return -EINVAL;
+ };
+ return 0;
+}
+
+static int sst_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+ struct sst_gain_value *gv = mc->gain_val;
+
+ switch (mc->type) {
+ case SST_GAIN_TLV:
+ gv->l_gain = ucontrol->value.integer.value[0];
+ gv->r_gain = ucontrol->value.integer.value[1];
+ pr_debug("%s: Volume %d, %d\n", __func__, gv->l_gain, gv->r_gain);
+ break;
+ case SST_GAIN_MUTE:
+ gv->mute = !!ucontrol->value.integer.value[0];
+ pr_debug("%s: Mute %d\n", __func__, gv->mute);
+ break;
+ case SST_GAIN_RAMP_DURATION:
+ gv->ramp_duration = ucontrol->value.integer.value[0];
+ pr_debug("%s: RampDuration %d\n", __func__, gv->ramp_duration);
+ break;
+ default:
+ pr_err("Invalid Input- gain type:%d\n", mc->type);
+ return -EINVAL;
+ };
+
+ if (mc->w && mc->w->power)
+ sst_send_gain_cmd(sst, gv, mc->task_id,
+ mc->pipe_id | mc->instance_id, mc->module_id, 0);
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10, 10, 0);
+
+/* Look up table to convert MIXER SW bit regs to SWM inputs */
+static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = {
+ [SST_IP_MODEM] = SST_SWM_IN_MODEM,
+ [SST_IP_BT] = SST_SWM_IN_BT,
+ [SST_IP_CODEC0] = SST_SWM_IN_CODEC0,
+ [SST_IP_CODEC1] = SST_SWM_IN_CODEC1,
+ [SST_IP_LOOP0] = SST_SWM_IN_SPROT_LOOP,
+ [SST_IP_LOOP1] = SST_SWM_IN_MEDIA_LOOP1,
+ [SST_IP_LOOP2] = SST_SWM_IN_MEDIA_LOOP2,
+ [SST_IP_SIDETONE] = SST_SWM_IN_SIDETONE,
+ [SST_IP_TXSPEECH] = SST_SWM_IN_TXSPEECH,
+ [SST_IP_SPEECH] = SST_SWM_IN_SPEECH,
+ [SST_IP_TONE] = SST_SWM_IN_TONE,
+ [SST_IP_VOIP] = SST_SWM_IN_VOIP,
+ [SST_IP_PCM0] = SST_SWM_IN_PCM0,
+ [SST_IP_PCM1] = SST_SWM_IN_PCM1,
+ [SST_IP_LOW_PCM0] = SST_SWM_IN_LOW_PCM0,
+ [SST_IP_FM] = SST_SWM_IN_FM,
+ [SST_IP_MEDIA0] = SST_SWM_IN_MEDIA0,
+ [SST_IP_MEDIA1] = SST_SWM_IN_MEDIA1,
+ [SST_IP_MEDIA2] = SST_SWM_IN_MEDIA2,
+ [SST_IP_MEDIA3] = SST_SWM_IN_MEDIA3,
+};
+
+static int fill_swm_input(struct swm_input_ids *swm_input, unsigned int reg)
+{
+ uint i, is_set, nb_inputs = 0;
+ u16 input_loc_id;
+
+ pr_debug("%s:reg value:%#x\n", __func__, reg);
+ for (i = 0; i < SST_SWM_INPUT_COUNT; i++) {
+ is_set = reg & BIT(i);
+ if (!is_set)
+ continue;
+
+ input_loc_id = swm_mixer_input_ids[i];
+ SST_FILL_DESTINATION(2, swm_input->input_id,
+ input_loc_id, SST_DEFAULT_MODULE_ID);
+ nb_inputs++;
+ swm_input++;
+ pr_debug("input id:%#x, nb_inputs:%d\n", input_loc_id, nb_inputs);
+
+ if (nb_inputs == SST_CMD_SWM_MAX_INPUTS) {
+ pr_warn("%s: SET_SWM cmd max inputs reached", __func__);
+ break;
+ }
+ }
+ return nb_inputs;
+}
+
+static void sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *sst, int mute)
+{
+ struct sst_gain_mixer_control *mc;
+ struct sst_gain_value *gv;
+ struct module *gain = NULL;
+
+ list_for_each_entry(gain, &ids->gain_list, node) {
+ struct snd_kcontrol *kctl = gain->kctl;
+
+ pr_debug("control name=%s", kctl->id.name);
+ mc = (void *)kctl->private_value;
+ gv = mc->gain_val;
+
+ sst_send_gain_cmd(sst, gv, mc->task_id,
+ mc->pipe_id | mc->instance_id, mc->module_id, mute);
+ }
+}
+
+static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_set_swm cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+ struct sst_ids *ids = w->priv;
+ bool set_mixer = false;
+ int val = sst->widget[ids->reg];
+
+ pr_debug("%s: widget=%s\n", __func__, w->name);
+ pr_debug("%s: reg[%d] = %#x\n", __func__, ids->reg, val);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ set_mixer = true;
+ break;
+ case SND_SOC_DAPM_POST_REG:
+ if (w->power)
+ set_mixer = true;
+ break;
+ default:
+ set_mixer = false;
+ }
+
+ if (set_mixer == false)
+ return 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event) ||
+ event == SND_SOC_DAPM_POST_REG)
+ cmd.switch_state = SST_SWM_ON;
+ else
+ cmd.switch_state = SST_SWM_OFF;
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ /* MMX_SET_SWM == SBA_SET_SWM */
+ cmd.header.command_id = SBA_SET_SWM;
+
+ SST_FILL_DESTINATION(2, cmd.output_id,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+ cmd.nb_inputs = fill_swm_input(&cmd.input[0], val);
+ cmd.header.length = offsetof(struct sst_cmd_set_swm, input) - sizeof(struct sst_dsp_header)
+ + (cmd.nb_inputs * sizeof(cmd.input[0]));
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ ids->task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+ return 0;
+}
+
+/* SBA mixers - 16 inputs */
+#define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg) \
+ static const struct snd_kcontrol_new kctl_name[] = { \
+ SOC_SINGLE_EXT("modem_in", mixer_reg, SST_IP_MODEM, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("bt_in", mixer_reg, SST_IP_BT, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("codec_in0", mixer_reg, SST_IP_CODEC0, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("codec_in1", mixer_reg, SST_IP_CODEC1, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("sprot_loop_in", mixer_reg, SST_IP_LOOP0, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("media_loop1_in", mixer_reg, SST_IP_LOOP1, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("media_loop2_in", mixer_reg, SST_IP_LOOP2, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("sidetone_in", mixer_reg, SST_IP_SIDETONE, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("txspeech_in", mixer_reg, SST_IP_TXSPEECH, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("speech_in", mixer_reg, SST_IP_SPEECH, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("tone_in", mixer_reg, SST_IP_TONE, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("voip_in", mixer_reg, SST_IP_VOIP, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("pcm0_in", mixer_reg, SST_IP_PCM0, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("pcm1_in", mixer_reg, SST_IP_PCM1, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("low_pcm0_in", mixer_reg, SST_IP_LOW_PCM0, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("fm_in", mixer_reg, SST_IP_FM, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ }
+
+#define SST_SBA_MIXER_GRAPH_MAP(mix_name) \
+ { mix_name, "modem_in", "modem_in" }, \
+ { mix_name, "bt_in", "bt_in" }, \
+ { mix_name, "codec_in0", "codec_in0" }, \
+ { mix_name, "codec_in1", "codec_in1" }, \
+ { mix_name, "sprot_loop_in", "sprot_loop_in" }, \
+ { mix_name, "media_loop1_in", "media_loop1_in" }, \
+ { mix_name, "media_loop2_in", "media_loop2_in" }, \
+ { mix_name, "sidetone_in", "sidetone_in" }, \
+ { mix_name, "txspeech_in", "txspeech_in" }, \
+ { mix_name, "speech_in", "speech_in" }, \
+ { mix_name, "tone_in", "tone_in" }, \
+ { mix_name, "voip_in", "voip_in" }, \
+ { mix_name, "pcm0_in", "pcm0_in" }, \
+ { mix_name, "pcm1_in", "pcm1_in" }, \
+ { mix_name, "low_pcm0_in", "low_pcm0_in" }, \
+ { mix_name, "fm_in", "fm_in" }
+
+#define SST_MMX_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg) \
+ static const struct snd_kcontrol_new kctl_name[] = { \
+ SOC_SINGLE_EXT("media0_in", mixer_reg, SST_IP_MEDIA0, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("media1_in", mixer_reg, SST_IP_MEDIA1, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("media2_in", mixer_reg, SST_IP_MEDIA2, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ SOC_SINGLE_EXT("media3_in", mixer_reg, SST_IP_MEDIA3, 1, 0, \
+ sst_mix_get, sst_mix_put), \
+ }
+
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media0_controls, SST_MIX_MEDIA0);
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media1_controls, SST_MIX_MEDIA1);
+
+/* 18 SBA mixers */
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm0_controls, SST_MIX_PCM0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm1_controls, SST_MIX_PCM1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm2_controls, SST_MIX_PCM2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_sprot_l0_controls, SST_MIX_LOOP0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l1_controls, SST_MIX_LOOP1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls, SST_MIX_LOOP2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_voip_controls, SST_MIX_VOIP);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_aware_controls, SST_MIX_AWARE);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_vad_controls, SST_MIX_VAD);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_sns_controls, SST_MIX_HF_SNS);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_controls, SST_MIX_HF);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_speech_controls, SST_MIX_SPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_rxspeech_controls, SST_MIX_RXSPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls, SST_MIX_CODEC0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls, SST_MIX_CODEC1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_bt_controls, SST_MIX_BT);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_fm_controls, SST_MIX_FM);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_modem_controls, SST_MIX_MODEM);
+
+static int sst_vb_trigger_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_generic cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+ pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ cmd.header.command_id = SBA_VB_START;
+ else
+ cmd.header.command_id = SBA_IDLE;
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.length = 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ sst_dsp->ops->power(true);
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+
+ if (!SND_SOC_DAPM_EVENT_ON(event))
+ sst_dsp->ops->power(false);
+ return 0;
+}
+
+static void sst_send_slot_map(struct sst_data *sst)
+{
+ struct sst_param_sba_ssp_slot_map cmd;
+
+ pr_debug("Enter: %s", __func__);
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.command_id = SBA_SET_SSP_SLOT_MAP;
+ cmd.header.length = sizeof(struct sst_param_sba_ssp_slot_map)
+ - sizeof(struct sst_dsp_header);
+
+ cmd.param_id = SBA_SET_SSP_SLOT_MAP;
+ cmd.param_len = sizeof(cmd.rx_slot_map) + sizeof(cmd.tx_slot_map) + sizeof(cmd.ssp_index);
+ cmd.ssp_index = SSP_CODEC;
+
+ memcpy(cmd.rx_slot_map, &sst_ssp_slot_map[0], sizeof(cmd.rx_slot_map));
+ memcpy(cmd.tx_slot_map, &sst_ssp_channel_map[0], sizeof(cmd.tx_slot_map));
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_ssp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_sba_hw_set_ssp cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+ struct sst_ids *ids = w->priv;
+ static int ssp_active[SST_NUM_SSPS];
+ unsigned int domain, mux;
+ unsigned int ssp_no = ids->ssp->ssp_number;
+ int domain_shift, mux_shift;
+ const struct sst_ssp_config *config;
+
+ pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.command_id = SBA_HW_SET_SSP;
+ cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp)
+ - sizeof(struct sst_dsp_header);
+ mux_shift = *ids->ssp->mux_shift;
+ mux = (mux_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, mux_shift);
+ domain_shift = (*ids->ssp->domain_shift)[mux];
+ domain = (domain_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, domain_shift);
+
+ config = &(*ids->ssp->ssp_config)[mux][domain];
+ pr_debug("%s: ssp_id: %u, mux: %d, domain: %d\n", __func__,
+ config->ssp_id, mux, domain);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ssp_active[ssp_no]++;
+ else
+ ssp_active[ssp_no]--;
+
+ pr_debug("%s: ssp_no: %u ssp_active: %d", __func__, ssp_no, ssp_active[ssp_no]);
+ if (ssp_active[ssp_no])
+ cmd.switch_state = SST_SWITCH_ON;
+ else
+ cmd.switch_state = SST_SWITCH_OFF;
+
+ cmd.selection = config->ssp_id;
+ cmd.nb_bits_per_slots = config->bits_per_slot;
+ cmd.nb_slots = config->slots;
+ cmd.mode = config->ssp_mode | (config->pcm_mode << 1);
+ cmd.duplex = config->duplex;
+ cmd.active_tx_slot_map = config->active_slot_map;
+ cmd.active_rx_slot_map = config->active_slot_map;
+ cmd.frame_sync_frequency = config->fs_frequency;
+ cmd.frame_sync_polarity = SSP_FS_ACTIVE_HIGH;
+ cmd.data_polarity = 1;
+ cmd.frame_sync_width = config->fs_width;
+ cmd.ssp_protocol = config->ssp_protocol;
+ cmd.start_delay = config->start_delay;
+ cmd.reserved1 = cmd.reserved2 = 0xFF;
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ sst_find_and_send_pipe_algo(w->platform, w);
+ sst_send_slot_map(sst);
+ sst_set_pipe_gain(ids, sst, 0);
+ }
+ return 0;
+}
+
+static int sst_set_speech_path(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_set_speech_path cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+ struct sst_ids *ids = w->priv;
+ bool is_wideband;
+ static int speech_active;
+
+ pr_debug("%s: widget=%s\n", __func__, w->name);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ speech_active++;
+ cmd.switch_state = SST_SWITCH_ON;
+ } else {
+ speech_active--;
+ cmd.switch_state = SST_SWITCH_OFF;
+ }
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+
+ cmd.header.command_id = SBA_VB_SET_SPEECH_PATH;
+ cmd.header.length = sizeof(struct sst_cmd_set_speech_path)
+ - sizeof(struct sst_dsp_header);
+ cmd.config.sample_length = 0;
+ cmd.config.rate = 0; /* 8 khz */
+ cmd.config.format = 0;
+
+ is_wideband = get_mux_state(sst, SST_MUX_REG, SST_VOICE_MODE_SHIFT);
+ if (is_wideband)
+ cmd.config.rate = 1; /* 16 khz */
+
+ if ((SND_SOC_DAPM_EVENT_ON(event) && (speech_active == 1)) ||
+ (SND_SOC_DAPM_EVENT_OFF(event) && (speech_active == 0)))
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ sst_find_and_send_pipe_algo(w->platform, w);
+ sst_set_pipe_gain(ids, sst, 0);
+ }
+
+ return 0;
+
+}
+
+static int sst_set_media_path(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_set_media_path cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+ struct sst_ids *ids = w->priv;
+
+ pr_debug("%s: widget=%s\n", __func__, w->name);
+ pr_debug("%s: task=%u, location=%#x\n", __func__,
+ ids->task_id, ids->location_id);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ cmd.switch_state = SST_PATH_ON;
+ else
+ cmd.switch_state = SST_PATH_OFF;
+
+ SST_FILL_DESTINATION(2, cmd.header.dst,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+
+ /* MMX_SET_MEDIA_PATH == SBA_SET_MEDIA_PATH */
+ cmd.header.command_id = MMX_SET_MEDIA_PATH;
+ cmd.header.length = sizeof(struct sst_cmd_set_media_path)
+ - sizeof(struct sst_dsp_header);
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ ids->task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ sst_find_and_send_pipe_algo(w->platform, w);
+ sst_set_pipe_gain(ids, sst, 0);
+ }
+
+ return 0;
+}
+
+static int sst_set_media_loop(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_sba_set_media_loop_map cmd;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+ struct sst_ids *ids = w->priv;
+
+ pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ cmd.switch_state = SST_SWITCH_ON;
+ else
+ cmd.switch_state = SST_SWITCH_OFF;
+
+ SST_FILL_DESTINATION(2, cmd.header.dst,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+
+ cmd.header.command_id = SBA_SET_MEDIA_LOOP_MAP;
+ cmd.header.length = sizeof(struct sst_cmd_sba_set_media_loop_map)
+ - sizeof(struct sst_dsp_header);
+ cmd.param.part.rate = 2; /* 48khz */
+
+ cmd.param.part.format = ids->format; /* stereo/Mono */
+ cmd.param.part.sample_length = 1; /* 24bit left justified*/
+ cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR */
+
+ sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ sst_find_and_send_pipe_algo(w->platform, w);
+ sst_set_pipe_gain(ids, sst, 0);
+ }
+ return 0;
+}
+
+static int sst_send_probe_cmd(struct sst_data *sst, u16 probe_pipe_id,
+ int mode, int switch_state,
+ const struct sst_probe_config *probe_cfg)
+{
+ struct sst_cmd_probe cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ SST_FILL_DESTINATION(3, cmd.header.dst, SST_DEFAULT_CELL_NBR,
+ probe_pipe_id, SST_DEFAULT_MODULE_ID);
+ cmd.header.command_id = SBA_PROBE;
+ cmd.header.length = sizeof(struct sst_cmd_probe)
+ - sizeof(struct sst_dsp_header);
+ cmd.switch_state = switch_state;
+
+ SST_FILL_DESTINATION(2, cmd.probe_dst,
+ probe_cfg->loc_id, probe_cfg->mod_id);
+
+ cmd.shared_mem = 1;
+ cmd.probe_in = 0;
+ cmd.probe_out = 0;
+
+ cmd.probe_mode = mode;
+ cmd.sample_length = probe_cfg->cfg.s_length;
+ cmd.rate = probe_cfg->cfg.rate;
+ cmd.format = probe_cfg->cfg.format;
+ cmd.sm_buf_id = 1;
+
+ return sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ probe_cfg->task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+static const struct snd_kcontrol_new sst_probe_controls[];
+static const struct sst_probe_config sst_probes[];
+
+#define SST_MAX_PROBE_STREAMS 8
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe_id,
+ int substream, int direction, bool on)
+{
+ int switch_state = on ? SST_SWITCH_ON : SST_SWITCH_OFF;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ const struct sst_probe_config *probe_cfg;
+ struct sst_probe_value *probe_val;
+ char *type;
+ int offset;
+ int mode;
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ mode = SST_PROBE_EXTRACTOR;
+ offset = 0;
+ type = "extractor";
+ } else {
+ mode = SST_PROBE_INJECTOR;
+ offset = SST_MAX_PROBE_STREAMS;
+ type = "injector";
+ }
+ /* get the value of the probe connection kcontrol */
+ probe_val = (void *)sst_probe_controls[substream + offset].private_value;
+ probe_cfg = &sst_probes[probe_val->val];
+
+ pr_debug("%s: substream=%d, direction=%d\n", __func__, substream, direction);
+ pr_debug("%s: %s probe point at %s\n", __func__, type, probe_cfg->name);
+
+ return sst_send_probe_cmd(sst, probe_pipe_id, mode, switch_state, probe_cfg);
+}
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+ SOC_SINGLE_EXT("switch", SST_MIX_SWITCH, 0, 1, 0,
+ sst_mix_get, sst_mix_put);
+
+static const char * const sst_bt_fm_texts[] = {
+ "fm", "bt",
+};
+
+static const struct snd_kcontrol_new sst_bt_fm_mux =
+ SST_SSP_MUX_CTL("ssp1_out", 0, SST_MUX_REG, SST_BT_FM_MUX_SHIFT, sst_bt_fm_texts,
+ sst_mux_get, sst_mux_put);
+
+#define SST_SSP_CODEC_MUX 0
+#define SST_SSP_CODEC_DOMAIN 0
+#define SST_SSP_MODEM_MUX 0
+#define SST_SSP_MODEM_DOMAIN 0
+#define SST_SSP_FM_MUX 0
+#define SST_SSP_FM_DOMAIN 0
+#define SST_SSP_BT_MUX 1
+#define SST_SSP_BT_NB_DOMAIN 0
+#define SST_SSP_BT_WB_DOMAIN 1
+
+static const int sst_ssp_mux_shift[SST_NUM_SSPS] = {
+ [SST_SSP0] = -1, /* no register shift, i.e. single mux value */
+ [SST_SSP1] = SST_BT_FM_MUX_SHIFT,
+ [SST_SSP2] = -1,
+};
+
+static const int sst_ssp_domain_shift[SST_NUM_SSPS][SST_MAX_SSP_MUX] = {
+ [SST_SSP0][0] = -1, /* no domain shift, i.e. single domain */
+ [SST_SSP1] = {
+ [SST_SSP_FM_MUX] = -1,
+ [SST_SSP_BT_MUX] = SST_BT_MODE_SHIFT,
+ },
+ [SST_SSP2][0] = -1,
+};
+
+static const struct sst_ssp_config
+sst_ssp_configs[SST_NUM_SSPS][SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS] = {
+ [SST_SSP0] = {
+ [SST_SSP_MODEM_MUX] = {
+ [SST_SSP_MODEM_DOMAIN] = {
+ .ssp_id = SSP_MODEM,
+ .bits_per_slot = 16,
+ .slots = 1,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NETWORK,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_PCM,
+ .fs_width = 1,
+ .fs_frequency = SSP_FS_48_KHZ,
+ .active_slot_map = 0x1,
+ .start_delay = 1,
+ },
+ },
+ },
+ [SST_SSP1] = {
+ [SST_SSP_FM_MUX] = {
+ [SST_SSP_FM_DOMAIN] = {
+ .ssp_id = SSP_FM,
+ .bits_per_slot = 16,
+ .slots = 2,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NORMAL,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_I2S,
+ .fs_width = 32,
+ .fs_frequency = SSP_FS_48_KHZ,
+ .active_slot_map = 0x3,
+ .start_delay = 0,
+ },
+ },
+ [SST_SSP_BT_MUX] = {
+ [SST_SSP_BT_NB_DOMAIN] = {
+ .ssp_id = SSP_BT,
+ .bits_per_slot = 16,
+ .slots = 1,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NORMAL,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_PCM,
+ .fs_width = 1,
+ .fs_frequency = SSP_FS_8_KHZ,
+ .active_slot_map = 0x1,
+ .start_delay = 1,
+ },
+ [SST_SSP_BT_WB_DOMAIN] = {
+ .ssp_id = SSP_BT,
+ .bits_per_slot = 16,
+ .slots = 1,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NORMAL,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_PCM,
+ .fs_width = 1,
+ .fs_frequency = SSP_FS_16_KHZ,
+ .active_slot_map = 0x1,
+ .start_delay = 1,
+ },
+ },
+ },
+ [SST_SSP2] = {
+ [SST_SSP_CODEC_MUX] = {
+ [SST_SSP_CODEC_DOMAIN] = {
+ .ssp_id = SSP_CODEC,
+ .bits_per_slot = 24,
+ .slots = 4,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NETWORK,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_PCM,
+ .fs_width = 1,
+ .fs_frequency = SSP_FS_48_KHZ,
+ .active_slot_map = 0xF,
+ .start_delay = 0,
+ },
+ },
+ },
+};
+
+#define SST_SSP_CFG(wssp_no) \
+ (const struct sst_ssp_cfg){ .ssp_config = &sst_ssp_configs[wssp_no], \
+ .ssp_number = wssp_no, \
+ .mux_shift = &sst_ssp_mux_shift[wssp_no], \
+ .domain_shift = &sst_ssp_domain_shift[wssp_no], }
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("tone"),
+ SND_SOC_DAPM_OUTPUT("aware"),
+ SND_SOC_DAPM_OUTPUT("vad"),
+ SST_SSP_INPUT("modem_in", sst_ssp_event, SST_SSP_CFG(SST_SSP0)),
+ SST_SSP_AIF_IN("codec_in0", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+ SST_SSP_AIF_IN("codec_in1", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+ SST_SSP_INPUT("bt_fm_in", sst_ssp_event, SST_SSP_CFG(SST_SSP1)),
+ SST_SSP_OUTPUT("modem_out", sst_ssp_event, SST_SSP_CFG(SST_SSP0)),
+ SST_SSP_AIF_OUT("codec_out0", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+ SST_SSP_AIF_OUT("codec_out1", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+ SST_SSP_OUTPUT("bt_fm_out", sst_ssp_event, SST_SSP_CFG(SST_SSP1)),
+
+ /* Media Paths */
+ /* MediaX IN paths are set via ALLOC, so no SET_MEDIA_PATH command */
+ SST_PATH_INPUT("media0_in", SST_TASK_MMX, SST_SWM_IN_MEDIA0, NULL),
+ SST_PATH_INPUT("media1_in", SST_TASK_MMX, SST_SWM_IN_MEDIA1, NULL),
+ SST_PATH_INPUT("media2_in", SST_TASK_MMX, SST_SWM_IN_MEDIA2, sst_set_media_path),
+ SST_PATH_INPUT("media3_in", SST_TASK_MMX, SST_SWM_IN_MEDIA3, NULL),
+ SST_PATH_OUTPUT("media0_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_set_media_path),
+ SST_PATH_OUTPUT("media1_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_set_media_path),
+
+ /* SBA PCM Paths */
+ SST_PATH_INPUT("pcm0_in", SST_TASK_SBA, SST_SWM_IN_PCM0, sst_set_media_path),
+ SST_PATH_INPUT("pcm1_in", SST_TASK_SBA, SST_SWM_IN_PCM1, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm0_out", SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm1_out", SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm2_out", SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_set_media_path),
+ /* TODO: check if this needs SET_MEDIA_PATH command*/
+ SST_PATH_INPUT("low_pcm0_in", SST_TASK_SBA, SST_SWM_IN_LOW_PCM0, NULL),
+
+ SST_PATH_INPUT("voip_in", SST_TASK_SBA, SST_SWM_IN_VOIP, sst_set_media_path),
+ SST_PATH_OUTPUT("voip_out", SST_TASK_SBA, SST_SWM_OUT_VOIP, sst_set_media_path),
+ SST_PATH_OUTPUT("aware_out", SST_TASK_SBA, SST_SWM_OUT_AWARE, sst_set_media_path),
+ SST_PATH_OUTPUT("vad_out", SST_TASK_SBA, SST_SWM_OUT_VAD, sst_set_media_path),
+
+ /* SBA Loops */
+ SST_PATH_INPUT("sprot_loop_in", SST_TASK_SBA, SST_SWM_IN_SPROT_LOOP, NULL),
+ SST_PATH_INPUT("media_loop1_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP1, NULL),
+ SST_PATH_INPUT("media_loop2_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP2, NULL),
+ SST_PATH_MEDIA_LOOP_OUTPUT("sprot_loop_out", SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, SST_FMT_MONO, sst_set_media_loop),
+ SST_PATH_MEDIA_LOOP_OUTPUT("media_loop1_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, SST_FMT_MONO, sst_set_media_loop),
+ SST_PATH_MEDIA_LOOP_OUTPUT("media_loop2_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, SST_FMT_STEREO, sst_set_media_loop),
+
+ /* TODO: need to send command */
+ SST_PATH_INPUT("sidetone_in", SST_TASK_SBA, SST_SWM_IN_SIDETONE, NULL),
+ SST_PATH_INPUT("tone_in", SST_TASK_SBA, SST_SWM_IN_TONE, NULL),
+ SST_PATH_INPUT("bt_in", SST_TASK_SBA, SST_SWM_IN_BT, NULL),
+ SST_PATH_INPUT("fm_in", SST_TASK_SBA, SST_SWM_IN_FM, NULL),
+ SST_PATH_OUTPUT("bt_out", SST_TASK_SBA, SST_SWM_OUT_BT, NULL),
+ SST_PATH_OUTPUT("fm_out", SST_TASK_SBA, SST_SWM_OUT_FM, NULL),
+
+ /* SBA Voice Paths */
+ SST_PATH_INPUT("speech_in", SST_TASK_SBA, SST_SWM_IN_SPEECH, sst_set_speech_path),
+ SST_PATH_INPUT("txspeech_in", SST_TASK_SBA, SST_SWM_IN_TXSPEECH, sst_set_speech_path),
+ SST_PATH_OUTPUT("hf_sns_out", SST_TASK_SBA, SST_SWM_OUT_HF_SNS, sst_set_speech_path),
+ SST_PATH_OUTPUT("hf_out", SST_TASK_SBA, SST_SWM_OUT_HF, sst_set_speech_path),
+ SST_PATH_OUTPUT("speech_out", SST_TASK_SBA, SST_SWM_OUT_SPEECH, sst_set_speech_path),
+ SST_PATH_OUTPUT("rxspeech_out", SST_TASK_SBA, SST_SWM_OUT_RXSPEECH, sst_set_speech_path),
+
+ /* Media Mixers */
+ SST_SWM_MIXER("media0_out mix 0", SST_MIX_MEDIA0, SST_TASK_MMX, SST_SWM_OUT_MEDIA0,
+ sst_mix_media0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media1_out mix 0", SST_MIX_MEDIA1, SST_TASK_MMX, SST_SWM_OUT_MEDIA1,
+ sst_mix_media1_controls, sst_swm_mixer_event),
+
+ /* SBA PCM mixers */
+ SST_SWM_MIXER("pcm0_out mix 0", SST_MIX_PCM0, SST_TASK_SBA, SST_SWM_OUT_PCM0,
+ sst_mix_pcm0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("pcm1_out mix 0", SST_MIX_PCM1, SST_TASK_SBA, SST_SWM_OUT_PCM1,
+ sst_mix_pcm1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("pcm2_out mix 0", SST_MIX_PCM2, SST_TASK_SBA, SST_SWM_OUT_PCM2,
+ sst_mix_pcm2_controls, sst_swm_mixer_event),
+
+ /* SBA Loop mixers */
+ SST_SWM_MIXER("sprot_loop_out mix 0", SST_MIX_LOOP0, SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP,
+ sst_mix_sprot_l0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media_loop1_out mix 0", SST_MIX_LOOP1, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1,
+ sst_mix_media_l1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media_loop2_out mix 0", SST_MIX_LOOP2, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2,
+ sst_mix_media_l2_controls, sst_swm_mixer_event),
+
+ SST_SWM_MIXER("voip_out mix 0", SST_MIX_VOIP, SST_TASK_SBA, SST_SWM_OUT_VOIP,
+ sst_mix_voip_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("aware_out mix 0", SST_MIX_AWARE, SST_TASK_SBA, SST_SWM_OUT_AWARE,
+ sst_mix_aware_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("vad_out mix 0", SST_MIX_VAD, SST_TASK_SBA, SST_SWM_OUT_VAD,
+ sst_mix_vad_controls, sst_swm_mixer_event),
+
+ /* SBA Voice mixers */
+ SST_SWM_MIXER("hf_sns_out mix 0", SST_MIX_HF_SNS, SST_TASK_SBA, SST_SWM_OUT_HF_SNS,
+ sst_mix_hf_sns_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("hf_out mix 0", SST_MIX_HF, SST_TASK_SBA, SST_SWM_OUT_HF,
+ sst_mix_hf_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("speech_out mix 0", SST_MIX_SPEECH, SST_TASK_SBA, SST_SWM_OUT_SPEECH,
+ sst_mix_speech_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("rxspeech_out mix 0", SST_MIX_RXSPEECH, SST_TASK_SBA, SST_SWM_OUT_RXSPEECH,
+ sst_mix_rxspeech_controls, sst_swm_mixer_event),
+
+ /* SBA Backend mixers */
+ SST_SWM_MIXER("codec_out0 mix 0", SST_MIX_CODEC0, SST_TASK_SBA, SST_SWM_OUT_CODEC0,
+ sst_mix_codec0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("codec_out1 mix 0", SST_MIX_CODEC1, SST_TASK_SBA, SST_SWM_OUT_CODEC1,
+ sst_mix_codec1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("bt_out mix 0", SST_MIX_BT, SST_TASK_SBA, SST_SWM_OUT_BT,
+ sst_mix_bt_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("fm_out mix 0", SST_MIX_FM, SST_TASK_SBA, SST_SWM_OUT_FM,
+ sst_mix_fm_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("modem_out mix 0", SST_MIX_MODEM, SST_TASK_SBA, SST_SWM_OUT_MODEM,
+ sst_mix_modem_controls, sst_swm_mixer_event),
+
+ SND_SOC_DAPM_SWITCH("aware_out aware 0", SND_SOC_NOPM, 0, 0, &sst_mix_sw_aware),
+ SND_SOC_DAPM_MUX("ssp1_out mux 0", SND_SOC_NOPM, 0, 0, &sst_bt_fm_mux),
+
+ SND_SOC_DAPM_SUPPLY("VBTimer", SND_SOC_NOPM, 0, 0,
+ sst_vb_trigger_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ {"media0_in", NULL, "Compress Playback"},
+ {"media1_in", NULL, "Headset Playback"},
+ {"media2_in", NULL, "pcm0_out"},
+ {"media3_in", NULL, "Deepbuffer Playback"},
+
+ {"media0_out mix 0", "media0_in", "media0_in"},
+ {"media0_out mix 0", "media1_in", "media1_in"},
+ {"media0_out mix 0", "media2_in", "media2_in"},
+ {"media0_out mix 0", "media3_in", "media3_in"},
+ {"media1_out mix 0", "media0_in", "media0_in"},
+ {"media1_out mix 0", "media1_in", "media1_in"},
+ {"media1_out mix 0", "media2_in", "media2_in"},
+ {"media1_out mix 0", "media3_in", "media3_in"},
+
+ {"media0_out", NULL, "media0_out mix 0"},
+ {"media1_out", NULL, "media1_out mix 0"},
+ {"pcm0_in", NULL, "media0_out"},
+ {"pcm1_in", NULL, "media1_out"},
+
+ {"Headset Capture", NULL, "pcm1_out"},
+ {"Headset Capture", NULL, "pcm2_out"},
+ {"pcm0_out", NULL, "pcm0_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm0_out mix 0"),
+ {"pcm1_out", NULL, "pcm1_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm1_out mix 0"),
+ {"pcm2_out", NULL, "pcm2_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm2_out mix 0"),
+
+ {"media_loop1_in", NULL, "media_loop1_out"},
+ {"media_loop1_out", NULL, "media_loop1_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("media_loop1_out mix 0"),
+ {"media_loop2_in", NULL, "media_loop2_out"},
+ {"media_loop2_out", NULL, "media_loop2_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("media_loop2_out mix 0"),
+ {"sprot_loop_in", NULL, "sprot_loop_out"},
+ {"sprot_loop_out", NULL, "sprot_loop_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("sprot_loop_out mix 0"),
+
+ {"voip_in", NULL, "VOIP Playback"},
+ {"VOIP Capture", NULL, "voip_out"},
+ {"voip_out", NULL, "voip_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("voip_out mix 0"),
+
+ {"aware", NULL, "aware_out"},
+ {"aware_out", NULL, "aware_out aware 0"},
+ {"aware_out aware 0", "switch", "aware_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("aware_out mix 0"),
+ {"vad", NULL, "vad_out"},
+ {"vad_out", NULL, "vad_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("vad_out mix 0"),
+
+ {"codec_out0", NULL, "codec_out0 mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"),
+ {"codec_out1", NULL, "codec_out1 mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"),
+ {"modem_out", NULL, "modem_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("modem_out mix 0"),
+
+ {"bt_fm_out", NULL, "ssp1_out mux 0"},
+ {"ssp1_out mux 0", "bt", "bt_out"},
+ {"ssp1_out mux 0", "fm", "fm_out"},
+ {"bt_out", NULL, "bt_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("bt_out mix 0"),
+ {"fm_out", NULL, "fm_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("fm_out mix 0"),
+ {"bt_in", NULL, "bt_fm_in"},
+ {"fm_in", NULL, "bt_fm_in"},
+
+ /* Uplink processing */
+ {"txspeech_in", NULL, "hf_sns_out"},
+ {"txspeech_in", NULL, "hf_out"},
+ {"txspeech_in", NULL, "speech_out"},
+
+ {"hf_sns_out", NULL, "hf_sns_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("hf_sns_out mix 0"),
+ {"hf_out", NULL, "hf_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("hf_out mix 0"),
+ {"speech_out", NULL, "speech_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("speech_out mix 0"),
+
+ /* Downlink processing */
+ {"speech_in", NULL, "rxspeech_out"},
+ {"rxspeech_out", NULL, "rxspeech_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("rxspeech_out mix 0"),
+
+ /* TODO: add Tone inputs */
+ /* TODO: add Low Latency stream support */
+
+ {"Headset Capture", NULL, "VBTimer"},
+ {"Headset Playback", NULL, "VBTimer"},
+ {"Deepbuffer Playback", NULL, "VBTimer"},
+ {"Compress Playback", NULL, "VBTimer"},
+ {"VOIP Playback", NULL, "VBTimer"},
+ {"aware", NULL, "VBTimer"},
+ {"modem_in", NULL, "VBTimer"},
+ {"modem_out", NULL, "VBTimer"},
+ {"bt_fm_in", NULL, "VBTimer"},
+ {"bt_fm_out", NULL, "VBTimer"},
+};
+
+static const char * const sst_nb_wb_texts[] = {
+ "narrowband", "wideband",
+};
+
+static const struct snd_kcontrol_new sst_mux_controls[] = {
+ SST_SSP_MUX_CTL("domain voice mode", 0, SST_MUX_REG, SST_VOICE_MODE_SHIFT, sst_nb_wb_texts,
+ sst_mode_get, sst_mode_put),
+ SST_SSP_MUX_CTL("domain bt mode", 0, SST_MUX_REG, SST_BT_MODE_SHIFT, sst_nb_wb_texts,
+ sst_mode_get, sst_mode_put),
+};
+
+static const char * const slot_names[] = {
+ "none",
+ "slot 0", "slot 1", "slot 2", "slot 3",
+ "slot 4", "slot 5", "slot 6", "slot 7", /* not supported by FW */
+};
+
+static const char * const channel_names[] = {
+ "none",
+ "codec_out0_0", "codec_out0_1", "codec_out1_0", "codec_out1_1",
+ "codec_out2_0", "codec_out2_1", "codec_out3_0", "codec_out3_1", /* not supported by FW */
+};
+
+#define SST_INTERLEAVER(xpname, slot_name, slotno) \
+ SST_SSP_SLOT_CTL(xpname, "interleaver", slot_name, slotno, 1, \
+ channel_names, sst_slot_get, sst_slot_put)
+
+#define SST_DEINTERLEAVER(xpname, channel_name, channel_no) \
+ SST_SSP_SLOT_CTL(xpname, "deinterleaver", channel_name, channel_no, 0, \
+ slot_names, sst_slot_get, sst_slot_put)
+
+static const struct snd_kcontrol_new sst_slot_controls[] = {
+ SST_INTERLEAVER("codec_out", "slot 0", 0),
+ SST_INTERLEAVER("codec_out", "slot 1", 1),
+ SST_INTERLEAVER("codec_out", "slot 2", 2),
+ SST_INTERLEAVER("codec_out", "slot 3", 3),
+ SST_DEINTERLEAVER("codec_in", "codec_in0_0", 0),
+ SST_DEINTERLEAVER("codec_in", "codec_in0_1", 1),
+ SST_DEINTERLEAVER("codec_in", "codec_in1_0", 2),
+ SST_DEINTERLEAVER("codec_in", "codec_in1_1", 3),
+};
+
+#define SST_NUM_PROBE_CONNECTION_PTS 31
+static const struct sst_probe_config sst_probes[SST_NUM_PROBE_CONNECTION_PTS] = {
+ /* TODO: get this struct from FW config data */
+ /* TODO: only gain outputs supported currently */
+ { "media0_in gain", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+ { "media1_in gain", SST_PATH_INDEX_MEDIA1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+ { "media2_in gain", SST_PATH_INDEX_MEDIA2_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+ { "media3_in gain", SST_PATH_INDEX_MEDIA3_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+ { "pcm0_in gain", SST_PATH_INDEX_PCM0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "pcm1_in gain", SST_PATH_INDEX_PCM1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "pcm1_out gain", SST_PATH_INDEX_PCM1_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "pcm2_out gain", SST_PATH_INDEX_PCM2_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "voip_in gain", SST_PATH_INDEX_VOIP_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "voip_out gain", SST_PATH_INDEX_VOIP_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "aware_out gain", SST_PATH_INDEX_AWARE_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "vad_out gain", SST_PATH_INDEX_VAD_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "hf_sns_out gain", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "hf_out gain", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "speech_out gain", SST_PATH_INDEX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "txspeech_in gain", SST_PATH_INDEX_TX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "rxspeech_out gain", SST_PATH_INDEX_RX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "speech_in gain", SST_PATH_INDEX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "media_loop1_out gain", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "media_loop2_out gain", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "tone_in gain", SST_PATH_INDEX_TONE_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "codec_out0 gain", SST_PATH_INDEX_CODEC_OUT0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "codec_out1 gain", SST_PATH_INDEX_CODEC_OUT1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "bt_out gain", SST_PATH_INDEX_BT_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "fm_out gain", SST_PATH_INDEX_FM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "modem_out gain", SST_PATH_INDEX_MODEM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "codec_in0 gain", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "codec_in1 gain", SST_PATH_INDEX_CODEC_IN1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "bt_in gain", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "fm_in gain", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+ { "modem_in gain", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+};
+
+/* initialized based on names in sst_probes array */
+static const char *sst_probe_enum_texts[SST_NUM_PROBE_CONNECTION_PTS];
+static const SOC_ENUM_SINGLE_EXT_DECL(sst_probe_enum, sst_probe_enum_texts);
+
+#define SST_PROBE_CTL(name, num) \
+ SST_PROBE_ENUM(SST_PROBE_CTL_NAME(name, num, "connection"), \
+ sst_probe_enum, sst_probe_get, sst_probe_put)
+ /* TODO: implement probe gains
+ SOC_SINGLE_EXT_TLV(SST_PROBE_CTL_NAME(name, num, "gains"), xreg, xshift,
+ xmax, xinv, xget, xput, sst_gain_tlv_common)
+ */
+
+static const struct snd_kcontrol_new sst_probe_controls[] = {
+ SST_PROBE_CTL("probe out", 0),
+ SST_PROBE_CTL("probe out", 1),
+ SST_PROBE_CTL("probe out", 2),
+ SST_PROBE_CTL("probe out", 3),
+ SST_PROBE_CTL("probe out", 4),
+ SST_PROBE_CTL("probe out", 5),
+ SST_PROBE_CTL("probe out", 6),
+ SST_PROBE_CTL("probe out", 7),
+ SST_PROBE_CTL("probe in", 0),
+ SST_PROBE_CTL("probe in", 1),
+ SST_PROBE_CTL("probe in", 2),
+ SST_PROBE_CTL("probe in", 3),
+ SST_PROBE_CTL("probe in", 4),
+ SST_PROBE_CTL("probe in", 5),
+ SST_PROBE_CTL("probe in", 6),
+ SST_PROBE_CTL("probe in", 7),
+};
+
+/* Gain helper with min/max set */
+#define SST_GAIN(name, path_id, task_id, instance, gain_var) \
+ SST_GAIN_KCONTROLS(name, "gain", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \
+ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \
+ sst_gain_get, sst_gain_put, \
+ SST_MODULE_ID_GAIN_CELL, path_id, instance, task_id, \
+ sst_gain_tlv_common, gain_var)
+
+#define SST_VOLUME(name, path_id, task_id, instance, gain_var) \
+ SST_GAIN_KCONTROLS(name, "volume", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \
+ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \
+ sst_gain_get, sst_gain_put, \
+ SST_MODULE_ID_VOLUME, path_id, instance, task_id, \
+ sst_gain_tlv_common, gain_var)
+
+#define SST_NUM_GAINS 36
+static struct sst_gain_value sst_gains[SST_NUM_GAINS];
+
+static const struct snd_kcontrol_new sst_gain_controls[] = {
+ SST_GAIN("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[0]),
+ SST_GAIN("media1_in", SST_PATH_INDEX_MEDIA1_IN, SST_TASK_MMX, 0, &sst_gains[1]),
+ SST_GAIN("media2_in", SST_PATH_INDEX_MEDIA2_IN, SST_TASK_MMX, 0, &sst_gains[2]),
+ SST_GAIN("media3_in", SST_PATH_INDEX_MEDIA3_IN, SST_TASK_MMX, 0, &sst_gains[3]),
+
+ SST_GAIN("pcm0_in", SST_PATH_INDEX_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[4]),
+ SST_GAIN("pcm1_in", SST_PATH_INDEX_PCM1_IN, SST_TASK_SBA, 0, &sst_gains[5]),
+ SST_GAIN("low_pcm0_in", SST_PATH_INDEX_LOW_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[6]),
+ SST_GAIN("pcm1_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[7]),
+ SST_GAIN("pcm2_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[8]),
+
+ SST_GAIN("voip_in", SST_PATH_INDEX_VOIP_IN, SST_TASK_SBA, 0, &sst_gains[9]),
+ SST_GAIN("voip_out", SST_PATH_INDEX_VOIP_OUT, SST_TASK_SBA, 0, &sst_gains[10]),
+ SST_GAIN("tone_in", SST_PATH_INDEX_TONE_IN, SST_TASK_SBA, 0, &sst_gains[11]),
+
+ SST_GAIN("aware_out", SST_PATH_INDEX_AWARE_OUT, SST_TASK_SBA, 0, &sst_gains[12]),
+ SST_GAIN("vad_out", SST_PATH_INDEX_VAD_OUT, SST_TASK_SBA, 0, &sst_gains[13]),
+
+ SST_GAIN("hf_sns_out", SST_PATH_INDEX_HF_SNS_OUT, SST_TASK_SBA, 0, &sst_gains[14]),
+ SST_GAIN("hf_out", SST_PATH_INDEX_HF_OUT, SST_TASK_SBA, 0, &sst_gains[15]),
+ SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[16]),
+ SST_GAIN("txspeech_in", SST_PATH_INDEX_TX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[17]),
+ SST_GAIN("rxspeech_out", SST_PATH_INDEX_RX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[18]),
+ SST_GAIN("speech_in", SST_PATH_INDEX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[19]),
+
+ SST_GAIN("codec_in0", SST_PATH_INDEX_CODEC_IN0, SST_TASK_SBA, 0, &sst_gains[20]),
+ SST_GAIN("codec_in1", SST_PATH_INDEX_CODEC_IN1, SST_TASK_SBA, 0, &sst_gains[21]),
+ SST_GAIN("codec_out0", SST_PATH_INDEX_CODEC_OUT0, SST_TASK_SBA, 0, &sst_gains[22]),
+ SST_GAIN("codec_out1", SST_PATH_INDEX_CODEC_OUT1, SST_TASK_SBA, 0, &sst_gains[23]),
+ SST_GAIN("bt_out", SST_PATH_INDEX_BT_OUT, SST_TASK_SBA, 0, &sst_gains[24]),
+ SST_GAIN("fm_out", SST_PATH_INDEX_FM_OUT, SST_TASK_SBA, 0, &sst_gains[25]),
+ SST_GAIN("bt_in", SST_PATH_INDEX_BT_IN, SST_TASK_SBA, 0, &sst_gains[26]),
+ SST_GAIN("fm_in", SST_PATH_INDEX_FM_IN, SST_TASK_SBA, 0, &sst_gains[27]),
+ SST_GAIN("modem_in", SST_PATH_INDEX_MODEM_IN, SST_TASK_SBA, 0, &sst_gains[28]),
+ SST_GAIN("modem_out", SST_PATH_INDEX_MODEM_OUT, SST_TASK_SBA, 0, &sst_gains[29]),
+ SST_GAIN("media_loop1_out", SST_PATH_INDEX_MEDIA_LOOP1_OUT, SST_TASK_SBA, 0, &sst_gains[30]),
+ SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[31]),
+ SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[32]),
+ SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[33]),
+ SST_GAIN("sidetone_in", SST_PATH_INDEX_SIDETONE_IN, SST_TASK_SBA, 0, &sst_gains[34]),
+ SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_FBA_UL, 1, &sst_gains[35]),
+};
+
+static const struct snd_kcontrol_new sst_algo_controls[] = {
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 138, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "mdrp", 76, SST_MODULE_ID_MDRP,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "fir", 272, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "mdrp", 76, SST_MODULE_ID_MDRP,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+ SST_ALGO_KCONTROL_BYTES("aware_out", "fir", 272, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("aware_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("aware_out", "aware", 48, SST_MODULE_ID_CONTEXT_ALGO_AWARE,
+ SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_AWARE, AWARE_ENV_CLASS_PARAMS),
+ SST_ALGO_KCONTROL_BYTES("vad_out", "fir", 272, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("vad_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("sprot_loop_out", "lpro", 192, SST_MODULE_ID_SPROT,
+ SST_PATH_INDEX_SPROT_LOOP_OUT, 0, SST_TASK_SBA, SBA_VB_LPRO),
+ SST_ALGO_KCONTROL_BYTES("codec_in0", "dcr", 300, SST_MODULE_ID_FILT_DCR,
+ SST_PATH_INDEX_CODEC_IN0, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("codec_in1", "dcr", 300, SST_MODULE_ID_FILT_DCR,
+ SST_PATH_INDEX_CODEC_IN1, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ /* Uplink */
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_speech", 136, SST_MODULE_ID_FIR_16,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_hf_sns", 136, SST_MODULE_ID_FIR_16,
+ SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_speech", 48, SST_MODULE_ID_IIR_16,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_hf_sns", 48, SST_MODULE_ID_IIR_16,
+ SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "aec", 640, SST_MODULE_ID_AEC,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AEC),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "nr", 38, SST_MODULE_ID_NR,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_NR_UL),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "agc", 58, SST_MODULE_ID_AGC,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AGC),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "biquad", 22, SST_MODULE_ID_DRP,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_BIQUAD_D_C),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "compr", 36, SST_MODULE_ID_DRP,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DUAL_BAND_COMP),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "sns", 324, SST_MODULE_ID_NR_SNS,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SNS),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ser", 42, SST_MODULE_ID_SER,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SER),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "cni", 48, SST_MODULE_ID_CNI_TX,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_TX_CNI),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ref", 24, SST_MODULE_ID_REF_LINE,
+ SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_REF_LINE),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "delay", 6, SST_MODULE_ID_EDL,
+ SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_DELAY_LINE),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "bmf", 264, SST_MODULE_ID_BMF,
+ SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_BMF),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "dnr", 18, SST_MODULE_ID_DNR,
+ SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DNR),
+ /* Downlink */
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "ana", 52, SST_MODULE_ID_ANA,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_ANA),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "fir", 136, SST_MODULE_ID_FIR_16,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_FIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "iir", 48, SST_MODULE_ID_IIR_16,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_IIR),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "nr", 38, SST_MODULE_ID_NR,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_NR_DL),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "biquad", 22, SST_MODULE_ID_DRP,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_BIQUAD_D_C),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "compr", 36, SST_MODULE_ID_DRP,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_DUAL_BAND_COMP),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "cni", 48, SST_MODULE_ID_CNI,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_RX_CNI),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "bwx", 54, SST_MODULE_ID_BWX,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_BWX),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "gmm", 586, SST_MODULE_ID_BWX,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GMM),
+ SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "glc", 18, SST_MODULE_ID_GLC,
+ SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GLC),
+};
+
+static const struct snd_kcontrol_new sst_debug_controls[] = {
+ SND_SOC_BYTES_EXT("sst debug byte control", SST_MAX_BIN_BYTES,
+ sst_byte_control_get, sst_byte_control_set),
+};
+
+static inline bool is_sst_dapm_widget(struct snd_soc_dapm_widget *w)
+{
+ if ((w->id == snd_soc_dapm_pga) ||
+ (w->id == snd_soc_dapm_aif_in) ||
+ (w->id == snd_soc_dapm_aif_out) ||
+ (w->id == snd_soc_dapm_input) ||
+ (w->id == snd_soc_dapm_output) ||
+ (w->id == snd_soc_dapm_mixer))
+ return true;
+ else
+ return false;
+}
+
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
+{
+ struct snd_soc_platform *platform = dai->platform;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_path *p = NULL;
+
+ pr_debug("%s: enter, dai-name=%s dir=%d\n", __func__, dai->name, stream);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pr_debug("Stream name=%s\n", dai->playback_widget->name);
+ w = dai->playback_widget;
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (p->connected && !p->connected(w, p->sink))
+ continue;
+
+ if (p->connect && p->sink->power && is_sst_dapm_widget(p->sink)) {
+ struct sst_ids *ids = p->sink->priv;
+
+ pr_debug("send gains for widget=%s\n", p->sink->name);
+ sst_set_pipe_gain(ids, sst, mute);
+ }
+ }
+ } else {
+ pr_debug("Stream name=%s\n", dai->capture_widget->name);
+ w = dai->capture_widget;
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (p->connected && !p->connected(w, p->sink))
+ continue;
+
+ if (p->connect && p->source->power && is_sst_dapm_widget(p->source)) {
+ struct sst_ids *ids = p->source->priv;
+
+ pr_debug("send gain for widget=%s\n", p->source->name);
+ sst_set_pipe_gain(ids, sst, mute);
+ }
+ }
+ }
+ return 0;
+}
+
+static int sst_fill_module_list(struct snd_kcontrol *kctl,
+ struct snd_soc_dapm_widget *w, int type)
+{
+ struct module *module = NULL;
+ struct sst_ids *ids = w->priv;
+
+ module = devm_kzalloc(w->platform->dev, sizeof(*module), GFP_KERNEL);
+ if (!module) {
+ pr_err("kzalloc block failed\n");
+ return -ENOMEM;
+ }
+
+ if (type == SST_MODULE_GAIN) {
+ struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+
+ mc->w = w;
+ module->kctl = kctl;
+ list_add_tail(&module->node, &ids->gain_list);
+ } else if (type == SST_MODULE_ALGO) {
+ struct sst_algo_control *bc = (void *)kctl->private_value;
+
+ bc->w = w;
+ module->kctl = kctl;
+ list_add_tail(&module->node, &ids->algo_list);
+ }
+
+ return 0;
+}
+
+static int sst_fill_widget_module_info(struct snd_soc_dapm_widget *w,
+ struct snd_soc_platform *platform)
+{
+ struct snd_kcontrol *kctl;
+ int index, ret = 0;
+ struct snd_card *card = platform->card->snd_card;
+ char *idx;
+
+ down_read(&card->controls_rwsem);
+
+ list_for_each_entry(kctl, &card->controls, list) {
+ idx = strstr(kctl->id.name, " ");
+ if (idx == NULL)
+ continue;
+ index = strlen(kctl->id.name) - strlen(idx);
+ if (strstr(kctl->id.name, "volume") &&
+ !strncmp(kctl->id.name, w->name, index))
+ ret = sst_fill_module_list(kctl, w, SST_MODULE_GAIN);
+ else if (strstr(kctl->id.name, "params") &&
+ !strncmp(kctl->id.name, w->name, index))
+ ret = sst_fill_module_list(kctl, w, SST_MODULE_ALGO);
+ else if (strstr(kctl->id.name, "mute") &&
+ !strncmp(kctl->id.name, w->name, index)) {
+ struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+ mc->w = w;
+ }
+ if (ret < 0) {
+ up_read(&card->controls_rwsem);
+ return ret;
+ }
+ }
+ up_read(&card->controls_rwsem);
+ return 0;
+}
+
+static int sst_map_modules_to_pipe(struct snd_soc_platform *platform)
+{
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_context *dapm = &platform->dapm;
+ int ret = 0;
+
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+ if (w->platform && is_sst_dapm_widget(w) && (w->priv)) {
+ struct sst_ids *ids = w->priv;
+
+ pr_debug("widget type=%d name=%s", w->id, w->name);
+ INIT_LIST_HEAD(&ids->algo_list);
+ INIT_LIST_HEAD(&ids->gain_list);
+ ret = sst_fill_widget_module_info(w, platform);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+{
+ int i, ret = 0;
+ struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+ sst->byte_stream = devm_kzalloc(platform->dev,
+ SST_MAX_BIN_BYTES, GFP_KERNEL);
+ if (!sst->byte_stream) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ sst->widget = devm_kzalloc(platform->dev,
+ SST_NUM_WIDGETS * sizeof(*sst->widget),
+ GFP_KERNEL);
+ if (!sst->widget) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+ ARRAY_SIZE(sst_dapm_widgets));
+ snd_soc_dapm_add_routes(&platform->dapm, intercon,
+ ARRAY_SIZE(intercon));
+ snd_soc_dapm_new_widgets(&platform->dapm);
+
+ for (i = 0; i < SST_NUM_GAINS; i++) {
+ sst_gains[i].mute = SST_GAIN_MUTE_DEFAULT;
+ sst_gains[i].l_gain = SST_GAIN_VOLUME_DEFAULT;
+ sst_gains[i].r_gain = SST_GAIN_VOLUME_DEFAULT;
+ sst_gains[i].ramp_duration = SST_GAIN_RAMP_DURATION_DEFAULT;
+ }
+
+ snd_soc_add_platform_controls(platform, sst_gain_controls,
+ ARRAY_SIZE(sst_gain_controls));
+
+ snd_soc_add_platform_controls(platform, sst_algo_controls,
+ ARRAY_SIZE(sst_algo_controls));
+ snd_soc_add_platform_controls(platform, sst_slot_controls,
+ ARRAY_SIZE(sst_slot_controls));
+ snd_soc_add_platform_controls(platform, sst_mux_controls,
+ ARRAY_SIZE(sst_mux_controls));
+
+ /* initialize the names of the probe points */
+ for (i = 0; i < SST_NUM_PROBE_CONNECTION_PTS; i++)
+ sst_probe_enum_texts[i] = sst_probes[i].name;
+
+ snd_soc_add_platform_controls(platform, sst_probe_controls,
+ ARRAY_SIZE(sst_probe_controls));
+
+ ret = sst_map_modules_to_pipe(platform);
+
+ return ret;
+}
--- /dev/null
+/*
+ * ipc_lib.h - Intel MID Platform driver header file
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __PLATFORMDRV_IPC_LIB_H__
+#define __PLATFORMDRV_IPC_LIB_H__
+
+struct sst_algo_int_control_v2;
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+ struct sst_algo_int_control_v2 *kdata);
+#endif
--- /dev/null
+/*
+ * ipc_lib_v2.c - Intel MID Platform Driver IPC wrappers for mrfld
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#include <sound/soc.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static inline void sst_fill_dsp_hdr(struct ipc_dsp_hdr *hdr, u8 index, u8 pipe,
+ u16 module, u16 cmd, u16 len)
+{
+ hdr->mod_index_id = index;
+ hdr->pipe_id = pipe;
+ hdr->mod_id = module;
+ hdr->cmd_id = cmd;
+ hdr->length = len;
+
+}
+
+static inline void sst_fill_byte_control_hdr(struct snd_sst_bytes_v2 *hdr,
+ u8 type, u8 msg, u8 block, u8 task, u8 pipe, u16 len)
+{
+ hdr->type = type;
+ hdr->ipc_msg = msg;
+ hdr->block = block;
+ hdr->task_id = task;
+ hdr->pipe_id = pipe;
+ hdr->rsvd = 0;
+ hdr->len = len;
+}
+
+#define SST_GAIN_V2_TIME_CONST 50
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+ struct sst_algo_int_control_v2 *kdata)
+{
+ struct snd_sst_gain_v2 gain1;
+ struct snd_sst_bytes_v2 byte_hdr;
+ struct ipc_dsp_hdr dsp_hdr;
+ char *tmp;
+ u16 len;
+ u8 ipc_msg;
+
+ /* Fill gain params */
+ gain1.gain_cell_num = 1; /* num of gain cells to modify*/
+ gain1.cell_nbr_idx = kdata->instance_id; /* instance index */
+ gain1.cell_path_idx = kdata->pipe_id; /* pipe id */
+ gain1.module_id = kdata->module_id; /*module id */
+ gain1.left_cell_gain = kdata->value; /* left gain value in dB*/
+ gain1.right_cell_gain = kdata->value; /* same value as left in dB*/
+ /* set to default recommended value*/
+ gain1.gain_time_const = SST_GAIN_V2_TIME_CONST;
+
+ /* fill dsp header */
+ /* Get params format for vol ctrl lib, size 6 bytes :
+ * u16 left_gain, u16 right_gain, u16 ramp
+ */
+ memset(&dsp_hdr, 0, sizeof(dsp_hdr));
+ if (type == SND_SST_BYTES_GET) {
+ len = 6;
+ ipc_msg = IPC_GET_PARAMS;
+ } else {
+ len = sizeof(gain1);
+ ipc_msg = IPC_SET_PARAMS;
+ }
+
+ sst_fill_dsp_hdr(&dsp_hdr, 0, kdata->pipe_id, kdata->module_id,
+ IPC_IA_SET_GAIN_MRFLD, len);
+
+ /* fill byte control header */
+ memset(&byte_hdr, 0, sizeof(byte_hdr));
+ len = sizeof(dsp_hdr) + dsp_hdr.length;
+ sst_fill_byte_control_hdr(&byte_hdr, type, ipc_msg, 1,
+ SST_TASK_ID_MEDIA, kdata->pipe_id, len);
+
+ /* fill complete byte stream as ipc payload */
+ tmp = bytes;
+ memcpy(tmp, &byte_hdr, sizeof(byte_hdr));
+ memcpy((tmp + sizeof(byte_hdr)), &dsp_hdr, sizeof(dsp_hdr));
+ if (type != SND_SST_BYTES_GET)
+ memcpy((tmp + sizeof(byte_hdr) + sizeof(dsp_hdr)), &gain1,
+ sizeof(gain1));
+#ifdef DEBUG_HEX_DUMP_BYTES
+ print_hex_dump_bytes(__func__, DUMP_PREFIX_NONE, bytes, 32);
+#endif
+}
--- /dev/null
+/*
+ * sst_widgets.h - Intel helpers to generate FW widgets
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __SST_WIDGETS_H__
+#define __SST_WIDGETS_H__
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define SST_MODULE_GAIN 1
+#define SST_MODULE_ALGO 2
+
+#define SST_FMT_MONO 0
+#define SST_FMT_STEREO 3
+
+/* physical SSP numbers */
+enum {
+ SST_SSP0 = 0,
+ SST_SSP1,
+ SST_SSP2,
+ SST_SSP_LAST = SST_SSP2,
+};
+
+#define SST_NUM_SSPS (SST_SSP_LAST + 1) /* physical SSPs */
+#define SST_MAX_SSP_MUX 2 /* single SSP muxed between pipes */
+#define SST_MAX_SSP_DOMAINS 2 /* domains present in each pipe */
+
+struct module {
+ struct snd_kcontrol *kctl;
+ struct list_head node;
+};
+
+struct sst_ssp_config {
+ u8 ssp_id;
+ u8 bits_per_slot;
+ u8 slots;
+ u8 ssp_mode;
+ u8 pcm_mode;
+ u8 duplex;
+ u8 ssp_protocol;
+ u8 fs_frequency;
+ u8 active_slot_map;
+ u8 start_delay;
+ u16 fs_width;
+};
+
+struct sst_ssp_cfg {
+ const u8 ssp_number;
+ const int *mux_shift;
+ const int (*domain_shift)[SST_MAX_SSP_MUX];
+ const struct sst_ssp_config (*ssp_config)[SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS];
+};
+
+struct sst_ids {
+ u16 location_id;
+ u16 module_id;
+ u8 task_id;
+ u8 format;
+ u8 reg;
+ struct list_head algo_list;
+ struct list_head gain_list;
+ const struct sst_ssp_cfg *ssp;
+};
+
+#define SST_SSP_AIF_IN(wname, wevent, wssp_cfg) \
+{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, .invert = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, } \
+}
+
+#define SST_SSP_AIF_OUT(wname, wevent, wssp_cfg) \
+{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, .invert = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, } \
+}
+
+#define SST_SSP_INPUT(wname, wevent, wssp_cfg) \
+{ .id = snd_soc_dapm_input, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, .invert = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, } \
+}
+
+#define SST_SSP_OUTPUT(wname, wevent, wssp_cfg) \
+{ .id = snd_soc_dapm_output, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, .invert = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, } \
+}
+
+#define SST_PATH(wname, wtask, wloc_id, wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0, \
+ .event = wevent, .event_flags = wflags, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, } \
+}
+
+#define SST_PATH_MEDIA_LOOP(wname, wtask, wloc_id, wformat, wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0, \
+ .event = wevent, .event_flags = wflags, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, \
+ .format = wformat,} \
+}
+
+/* output is triggered before input */
+#define SST_PATH_INPUT(name, task_id, loc_id, event) \
+ SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_OUTPUT(name, task_id, loc_id, event) \
+ SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_MEDIA_LOOP_OUTPUT(name, task_id, loc_id, format, event) \
+ SST_PATH_MEDIA_LOOP(name, task_id, loc_id, format, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+
+#define SST_SWM_MIXER(wname, wreg, wtask, wloc_id, wcontrols, wevent) \
+{ .id = snd_soc_dapm_mixer, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .invert = 0, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols),\
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD | \
+ SND_SOC_DAPM_POST_REG, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, \
+ .reg = wreg } \
+}
+
+enum sst_gain_kcontrol_type {
+ SST_GAIN_TLV,
+ SST_GAIN_MUTE,
+ SST_GAIN_RAMP_DURATION,
+};
+
+struct sst_gain_mixer_control {
+ bool stereo;
+ enum sst_gain_kcontrol_type type;
+ struct sst_gain_value *gain_val;
+ int max;
+ int min;
+ u16 instance_id;
+ u16 module_id;
+ u16 pipe_id;
+ u16 task_id;
+ char pname[44];
+ struct snd_soc_dapm_widget *w;
+};
+
+struct sst_gain_value {
+ u16 ramp_duration;
+ s16 l_gain;
+ s16 r_gain;
+ bool mute;
+};
+
+#define SST_GAIN_VOLUME_DEFAULT (-1440)
+#define SST_GAIN_RAMP_DURATION_DEFAULT 5 /* timeconstant */
+#define SST_GAIN_MUTE_DEFAULT true
+
+#define SST_GAIN_KCONTROL_TLV(xname, xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, tlv_array, xgain_val, \
+ xmin, xmax, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = sst_gain_ctl_info,\
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = true, .max = xmax, .min = xmin, .type = SST_GAIN_TLV, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_INT(xname, xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, xtype, xgain_val, \
+ xmin, xmax, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = sst_gain_ctl_info, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = false, .max = xmax, .min = xmin, .type = xtype, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_BOOL(xname, xhandler_get, xhandler_put,\
+ xmod, xpipe, xinstance, xtask, xgain_val, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_bool_ext, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = false, .type = SST_GAIN_MUTE, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_CONTROL_NAME(xpname, xmname, xinstance, xtype) \
+ xpname " " xmname " " #xinstance " " xtype
+
+#define SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, xtype, xsubmodule) \
+ xpname " " xmname " " #xinstance " " xtype " " xsubmodule
+
+/*
+ * 3 Controls for each Gain module
+ * e.g. - pcm0_in gain 0 volume
+ * - pcm0_in gain 0 rampduration
+ * - pcm0_in gain 0 mute
+ */
+#define SST_GAIN_KCONTROLS(xpname, xmname, xmin_gain, xmax_gain, xmin_tc, xmax_tc, \
+ xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, tlv_array, xgain_val) \
+ { SST_GAIN_KCONTROL_INT(SST_CONTROL_NAME(xpname, xmname, xinstance, "rampduration"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, SST_GAIN_RAMP_DURATION, \
+ xgain_val, xmin_tc, xmax_tc, xpname) }, \
+ { SST_GAIN_KCONTROL_BOOL(SST_CONTROL_NAME(xpname, xmname, xinstance, "mute"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, \
+ xgain_val, xpname) } ,\
+ { SST_GAIN_KCONTROL_TLV(SST_CONTROL_NAME(xpname, xmname, xinstance, "volume"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, tlv_array, \
+ xgain_val, xmin_gain, xmax_gain, xpname) }
+
+#define SST_GAIN_TC_MIN 5
+#define SST_GAIN_TC_MAX 5000
+#define SST_GAIN_MIN_VALUE -1440 /* in 0.1 DB units */
+#define SST_GAIN_MAX_VALUE 360
+
+enum sst_algo_kcontrol_type {
+ SST_ALGO_PARAMS,
+ SST_ALGO_BYPASS,
+};
+
+struct sst_algo_control {
+ enum sst_algo_kcontrol_type type;
+ int max;
+ u16 module_id;
+ u16 pipe_id;
+ u16 instance_id;
+ u16 task_id;
+ u16 cmd_id;
+ bool bypass;
+ unsigned char *params;
+ char pname[44];
+ struct snd_soc_dapm_widget *w;
+};
+
+#define SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, \
+ xpipe, xinstance, xtask, xcmd) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
+ .name = SST_CONTROL_NAME(xpname, xmname, xinstance, "params"), \
+ .info = sst_algo_bytes_ctl_info, \
+ .get = sst_algo_control_get, .put = sst_algo_control_set, \
+ .private_value = (unsigned long)&(struct sst_algo_control) \
+ {.max = xcount, .type = SST_ALGO_PARAMS, .module_id = xmod, .pname = xpname, \
+ .pipe_id = xpipe, .instance_id = xinstance, .task_id = xtask, .cmd_id = xcmd} }
+
+#define SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = SST_CONTROL_NAME(xpname, xmname, xinstance, "bypass"), \
+ .info = snd_soc_info_bool_ext, \
+ .get = sst_algo_control_get, .put = sst_algo_control_set, \
+ .private_value = (unsigned long)&(struct sst_algo_control) \
+ {.type = SST_ALGO_BYPASS, .module_id = xmod, .pipe_id = xpipe, .pname = xpname, \
+ .task_id = xtask, .instance_id = xinstance, .bypass = 0 } }
+
+#define SST_ALGO_BYPASS_PARAMS(xpname, xmname, xcount, xmod, xpipe, \
+ xinstance, xtask, xcmd) \
+ SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask), \
+ SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, xpipe, xinstance, xtask, xcmd)
+
+#define SST_COMBO_ALGO_KCONTROL_BYTES(xpname, xmname, xsubmod, xcount, xmod, \
+ xpipe, xinstance, xtask, xcmd) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
+ .name = SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, "params", xsubmod), \
+ .info = sst_algo_bytes_ctl_info, \
+ .get = sst_algo_control_get, .put = sst_algo_control_set, \
+ .private_value = (unsigned long)&(struct sst_algo_control) \
+ {.max = xcount, .type = SST_ALGO_PARAMS, .module_id = xmod, .pname = xpname, \
+ .pipe_id = xpipe, .instance_id = xinstance, .task_id = xtask, .cmd_id = xcmd} }
+
+
+/* only 4 slots/channels supported atm */
+#define SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts) \
+ (struct soc_enum){ .reg = s_ch_no, .reg2 = is_tx, .max = 4+1, .texts = xtexts, }
+
+#define SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name) \
+ xpname " " xmname " " s_ch_name
+
+#define SST_SSP_SLOT_CTL(xpname, xmname, s_ch_name, s_ch_no, is_tx, xtexts, xget, xput) \
+ SOC_DAPM_ENUM_EXT(SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name), \
+ SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts), \
+ xget, xput)
+
+#define SST_MUX_CTL_NAME(xpname, xinstance) \
+ xpname " " #xinstance
+
+#define SST_SSP_MUX_ENUM(xreg, xshift, xtexts) \
+ (struct soc_enum){ .reg = xreg, .texts = xtexts, .shift_l = xshift, \
+ .shift_r = xshift, .max = ARRAY_SIZE(xtexts), }
+
+#define SST_SSP_MUX_CTL(xpname, xinstance, xreg, xshift, xtexts, xget, xput) \
+ SOC_DAPM_ENUM_EXT(SST_MUX_CTL_NAME(xpname, xinstance), \
+ SST_SSP_MUX_ENUM(xreg, xshift, xtexts), \
+ xget, xput)
+
+struct sst_probe_value {
+ unsigned int val;
+ const struct soc_enum *p_enum;
+};
+
+#define SST_PROBE_CTL_NAME(dir, num, type) \
+ dir #num " " type
+
+#define SST_PROBE_ENUM(xname, xenum, xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = sst_probe_enum_info, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_probe_value) \
+ { .val = 0, .p_enum = &xenum } }
+
+#endif
--- /dev/null
+/*
+* platform_ipc_v2.h - Intel MID Platform driver FW IPC definitions
+*
+* Copyright (C) 2008-10 Intel Corporation
+* Author: Vinod Koul <vinod.koul@intel.com>
+* Harsha Priya <priya.harsha@intel.com>
+* Dharageswari R <dharageswari.r@intel.com>
+* KP Jeeja <jeeja.kp@intel.com>
+* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+*
+* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+* This driver exposes the audio engine functionalities to the ALSA
+* and middleware.
+* This file has definitions shared between the firmware and driver
+*/
+#ifndef __PLATFORM_IPC_V2_H__
+#define __PLATFORM_IPC_V2_H__
+
+#define MAX_DBG_RW_BYTES 80
+#define MAX_NUM_SCATTER_BUFFERS 8
+#define MAX_LOOP_BACK_DWORDS 8
+/* IPC base address and mailbox, timestamp offsets */
+#define SST_MAILBOX_SIZE 0x0400
+#define SST_MAILBOX_SEND 0x0000
+#define SST_TIME_STAMP 0x1800
+#define SST_TIME_STAMP_MRFLD 0x680
+#define SST_TIME_STAMP_BYT 0x800
+#define SST_RESERVED_OFFSET 0x1A00
+#define SST_SCU_LPE_MAILBOX 0x1000
+#define SST_LPE_SCU_MAILBOX 0x1400
+#define SST_SCU_LPE_LOG_BUF (SST_SCU_LPE_MAILBOX+16)
+#define PROCESS_MSG 0x80
+
+/* Message ID's for IPC messages */
+/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
+
+/* I2L Firmware/Codec Download msgs */
+#define IPC_IA_PREP_LIB_DNLD 0x01
+#define IPC_IA_LIB_DNLD_CMPLT 0x02
+#define IPC_IA_GET_FW_VERSION 0x04
+#define IPC_IA_GET_FW_BUILD_INF 0x05
+#define IPC_IA_GET_FW_INFO 0x06
+#define IPC_IA_GET_FW_CTXT 0x07
+#define IPC_IA_SET_FW_CTXT 0x08
+#define IPC_IA_PREPARE_SHUTDOWN 0x31
+/* I2L Codec Config/control msgs */
+#define IPC_PREP_D3 0x10
+#define IPC_IA_SET_CODEC_PARAMS 0x10
+#define IPC_IA_GET_CODEC_PARAMS 0x11
+#define IPC_IA_SET_PPP_PARAMS 0x12
+#define IPC_IA_GET_PPP_PARAMS 0x13
+#define IPC_SST_PERIOD_ELAPSED_MRFLD 0xA
+#define IPC_IA_ALG_PARAMS 0x1A
+#define IPC_IA_TUNING_PARAMS 0x1B
+#define IPC_IA_SET_RUNTIME_PARAMS 0x1C
+#define IPC_IA_SET_PARAMS 0x1
+#define IPC_IA_GET_PARAMS 0x2
+
+#define IPC_EFFECTS_CREATE 0xE
+#define IPC_EFFECTS_DESTROY 0xF
+
+/* I2L Stream config/control msgs */
+#define IPC_IA_ALLOC_STREAM_MRFLD 0x2
+#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
+#define IPC_IA_FREE_STREAM_MRFLD 0x03
+#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
+#define IPC_IA_SET_STREAM_PARAMS 0x22
+#define IPC_IA_SET_STREAM_PARAMS_MRFLD 0x12
+#define IPC_IA_GET_STREAM_PARAMS 0x23
+#define IPC_IA_PAUSE_STREAM 0x24
+#define IPC_IA_PAUSE_STREAM_MRFLD 0x4
+#define IPC_IA_RESUME_STREAM 0x25
+#define IPC_IA_RESUME_STREAM_MRFLD 0x5
+#define IPC_IA_DROP_STREAM 0x26
+#define IPC_IA_DROP_STREAM_MRFLD 0x07
+#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
+#define IPC_IA_DRAIN_STREAM_MRFLD 0x8
+#define IPC_IA_CONTROL_ROUTING 0x29
+#define IPC_IA_VTSV_UPDATE_MODULES 0x20
+#define IPC_IA_VTSV_DETECTED 0x21
+
+#define IPC_IA_START_STREAM_MRFLD 0X06
+#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
+
+#define IPC_IA_SET_GAIN_MRFLD 0x21
+/* Debug msgs */
+#define IPC_IA_DBG_MEM_READ 0x40
+#define IPC_IA_DBG_MEM_WRITE 0x41
+#define IPC_IA_DBG_LOOP_BACK 0x42
+#define IPC_IA_DBG_LOG_ENABLE 0x45
+#define IPC_IA_DBG_SET_PROBE_PARAMS 0x47
+
+/* L2I Firmware/Codec Download msgs */
+#define IPC_IA_FW_INIT_CMPLT 0x81
+#define IPC_IA_FW_INIT_CMPLT_MRFLD 0x01
+#define IPC_IA_FW_ASYNC_ERR_MRFLD 0x11
+
+/* L2I Codec Config/control msgs */
+#define IPC_SST_FRAGMENT_ELPASED 0x90 /* Request IA more data */
+
+#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
+#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
+#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
+#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
+#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
+#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
+
+#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
+/* L2S messages */
+#define IPC_SC_DDR_LINK_UP 0xC0
+#define IPC_SC_DDR_LINK_DOWN 0xC1
+#define IPC_SC_SET_LPECLK_REQ 0xC2
+#define IPC_SC_SSP_BIT_BANG 0xC3
+
+/* L2I Error reporting msgs */
+#define IPC_IA_MEM_ALLOC_FAIL 0xE0
+#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
+ stream can be used by playback and
+ capture modules */
+
+/* L2I Debug msgs */
+#define IPC_IA_PRINT_STRING 0xF0
+
+/* Buffer under-run */
+#define IPC_IA_BUF_UNDER_RUN_MRFLD 0x0B
+
+/* Mrfld specific defines:
+ * For asynchronous messages(INIT_CMPLT, PERIOD_ELAPSED, ASYNC_ERROR)
+ * received from FW, the format is:
+ * - IPC High: pvt_id is set to zero. Always short message.
+ * - msg_id is in lower 16-bits of IPC low payload.
+ * - pipe_id is in higher 16-bits of IPC low payload for period_elapsed.
+ * - error id is in higher 16-bits of IPC low payload for async errors.
+ */
+#define SST_ASYNC_DRV_ID 0
+
+/* Command Response or Acknowledge message to any IPC message will have
+ * same message ID and stream ID information which is sent.
+ * There is no specific Ack message ID. The data field is used as response
+ * meaning.
+ */
+
+/* SCU IPC for resetting & power gating the LPE through SCU */
+#define IPC_SCU_LPE_RESET 0xA3
+
+enum ackData {
+ IPC_ACK_SUCCESS = 0,
+ IPC_ACK_FAILURE,
+};
+
+enum ipc_ia_msg_id {
+ IPC_CMD = 1, /*!< Task Control message ID */
+ IPC_SET_PARAMS = 2,/*!< Task Set param message ID */
+ IPC_GET_PARAMS = 3, /*!< Task Get param message ID */
+ IPC_INVALID = 0xFF, /*!<Task Get param message ID */
+};
+
+enum sst_codec_types {
+ /* AUDIO/MUSIC CODEC Type Definitions */
+ SST_CODEC_TYPE_UNKNOWN = 0,
+ SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
+ SST_CODEC_TYPE_MP3,
+ SST_CODEC_TYPE_MP24,
+ SST_CODEC_TYPE_AAC,
+ SST_CODEC_TYPE_AACP,
+ SST_CODEC_TYPE_eAACP,
+ SST_CODEC_TYPE_WMA9,
+ SST_CODEC_TYPE_WMA10,
+ SST_CODEC_TYPE_WMA10P,
+ SST_CODEC_TYPE_RA,
+ SST_CODEC_TYPE_DDAC3,
+ SST_CODEC_TYPE_STEREO_TRUE_HD,
+ SST_CODEC_TYPE_STEREO_HD_PLUS,
+
+ /* VOICE CODEC Type Definitions */
+ SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
+};
+
+enum sst_algo_types {
+ SST_ALGO_SRC = 0x64,
+ SST_ALGO_MIXER = 0x65,
+ SST_ALGO_DOWN_MIXER = 0x66,
+ SST_ALGO_VTSV = 0x73,
+ SST_ALGO_AUDCLASSIFIER = 0x80,
+ SST_ALGO_VOLUME_CONTROL = 0x92,
+ SST_ALGO_GEQ = 0x99,
+};
+
+enum stream_type {
+ SST_STREAM_TYPE_NONE = 0,
+ SST_STREAM_TYPE_MUSIC = 1,
+ SST_STREAM_TYPE_NORMAL = 2,
+ SST_STREAM_TYPE_PROBE = 3,
+ SST_STREAM_TYPE_LONG_PB = 4,
+ SST_STREAM_TYPE_LOW_LATENCY = 5,
+};
+
+enum sst_error_codes {
+ /* Error code,response to msgId: Description */
+ /* Common error codes */
+ SST_SUCCESS = 0, /* Success */
+ SST_ERR_INVALID_STREAM_ID = 1,
+ SST_ERR_INVALID_MSG_ID = 2,
+ SST_ERR_INVALID_STREAM_OP = 3,
+ SST_ERR_INVALID_PARAMS = 4,
+ SST_ERR_INVALID_CODEC = 5,
+ SST_ERR_INVALID_MEDIA_TYPE = 6,
+ SST_ERR_STREAM_ERR = 7,
+
+ /* IPC specific error codes */
+ SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
+ SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
+ SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
+ SST_IPC_ERR_GET_STREAM_FAILED = 11,
+ SST_ERR_MOD_NOT_AVAIL = 12,
+ SST_ERR_MOD_DNLD_RQD = 13,
+ SST_ERR_STREAM_STOPPED = 14,
+ SST_ERR_STREAM_IN_USE = 15,
+
+ /* Capture specific error codes */
+ SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
+ SST_CAP_ERR_CAPTURE_FAIL = 17,
+ SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
+ SST_CAP_ERR_UNDER_RUN = 19,
+ SST_CAP_ERR_OVERFLOW = 20,
+
+ /* Playback specific error codes*/
+ SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
+ SST_PB_ERR_PLAY_FAIL = 22,
+ SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
+
+ /* Codec manager specific error codes */
+ SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
+ SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
+
+ /* Library manager specific error codes */
+ SST_SCC_ERR_PREP_DNLD_FAILED = 26,
+ SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
+ /* Scheduler specific error codes */
+ SST_SCH_ERR_FAIL = 28,
+
+ /* DMA specific error codes */
+ SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
+ SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
+ SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
+ SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
+ SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
+ SST_DMA_ERR_TRANSFER_FAILED = 34,
+
+ SST_SSP_ERR_ALREADY_ENABLED = 35,
+ SST_SSP_ERR_ALREADY_DISABLED = 36,
+ SST_SSP_ERR_NOT_INITIALIZED = 37,
+ SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
+
+ /* Other error codes */
+ SST_ERR_MOD_INIT_FAIL = 39,
+
+ /* FW init error codes */
+ SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
+ SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
+ SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
+ SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
+
+ /* Memory debug error codes */
+ SST_ERR_DBG_MEM_READ_FAIL = 44,
+ SST_ERR_DBG_MEM_WRITE_FAIL = 45,
+ SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
+ SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
+
+ SST_ERR_BUFFER_NOT_AVAILABLE = 48,
+ SST_ERR_BUFFER_NOT_ALLOCATED = 49,
+ SST_ERR_INVALID_REGION_TYPE = 50,
+ SST_ERR_NULL_PTR = 51,
+ SST_ERR_INVALID_BUFFER_SIZE = 52,
+ SST_ERR_INVALID_BUFFER_INDEX = 53,
+
+ /*IIPC specific error codes */
+ SST_IIPC_QUEUE_FULL = 54,
+ SST_IIPC_ERR_MSG_SND_FAILED = 55,
+ SST_PB_ERR_UNDERRUN_OCCURED = 56,
+ SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
+ SST_INVALID_TIME_SLOTS = 58,
+};
+
+enum dbg_mem_data_type {
+ /* Data type of debug read/write */
+ DATA_TYPE_U32,
+ DATA_TYPE_U16,
+ DATA_TYPE_U8,
+};
+
+enum dbg_type {
+ NO_DEBUG = 0,
+ SRAM_DEBUG,
+ PTI_DEBUG,
+};
+
+struct ipc_dsp_hdr {
+ u16 mod_index_id:8; /*!< DSP Command ID specific to tasks */
+ u16 pipe_id:8; /*!< instance of the module in the pipeline */
+ u16 mod_id; /*!< Pipe_id */
+ u16 cmd_id; /*!< Module ID = lpe_algo_types_t */
+ u16 length; /*!< Length of the payload only */
+} __packed;
+
+struct ipc_dsp_effects_info {
+ u16 cmd_id;
+ u16 length;
+ u16 sel_pos;
+ u16 sel_algo_id;
+ u16 cpu_load; /* CPU load indication */
+ u16 memory_usage; /* Data Memory usage */
+ u32 flags; /* effect engine caps/requirements flags */
+} __packed;
+
+struct ipc_effect_dsp_hdr {
+ u16 mod_index_id:8; /*!< DSP Command ID specific to tasks */
+ u16 pipe_id:8; /*!< instance of the module in the pipeline */
+ u16 mod_id; /*!< Pipe_id */
+} __packed;
+
+struct ipc_effect_payload {
+ struct ipc_effect_dsp_hdr dsp_hdr;
+ char *data;
+};
+
+union ipc_header_high {
+ struct {
+ u32 msg_id:8; /* Message ID - Max 256 Message Types */
+ u32 task_id:4; /* Task ID associated with this comand */
+ u32 drv_id:4; /* Identifier for the driver to track*/
+ u32 rsvd1:8; /* Reserved */
+ u32 result:4; /* Reserved */
+ u32 res_rqd:1; /* Response rqd */
+ u32 large:1; /* Large Message if large = 1 */
+ u32 done:1; /* bit 30 - Done bit */
+ u32 busy:1; /* bit 31 - busy bit*/
+ } part;
+ u32 full;
+} __packed;
+
+/* IPC header */
+union ipc_header_mrfld {
+ struct {
+ u32 header_low_payload;
+ union ipc_header_high header_high;
+ } p;
+ u64 full;
+} __packed;
+
+/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
+
+/* IPC Header */
+union ipc_header {
+ struct {
+ u32 msg_id:8; /* Message ID - Max 256 Message Types */
+ u32 str_id:5;
+ u32 large:1; /* Large Message if large = 1 */
+ u32 reserved:2; /* Reserved for future use */
+ u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
+ u32 done:1; /* bit 30 */
+ u32 busy:1; /* bit 31 */
+ } part;
+ u32 full;
+} __packed;
+
+/* Firmware build info */
+struct sst_fw_build_info {
+ unsigned char date[16]; /* Firmware build date */
+ unsigned char time[16]; /* Firmware build time */
+} __packed;
+
+/* Firmware Version info */
+struct snd_sst_fw_version {
+ u8 build; /* build number*/
+ u8 minor; /* minor number*/
+ u8 major; /* major number*/
+ u8 type; /* build type */
+};
+
+struct ipc_header_fw_init {
+ struct snd_sst_fw_version fw_version;/* Firmware version details */
+ struct sst_fw_build_info build_info;
+ u16 result; /* Fw init result */
+ u8 module_id; /* Module ID in case of error */
+ u8 debug_info; /* Debug info from Module ID in case of fail */
+} __packed;
+
+struct snd_sst_tstamp {
+ u64 ring_buffer_counter; /* PB/CP: Bytes copied from/to DDR. */
+ u64 hardware_counter; /* PB/CP: Bytes DMAed to/from SSP. */
+ u64 frames_decoded;
+ u64 bytes_decoded;
+ u64 bytes_copied;
+ u32 sampling_frequency;
+ u32 channel_peak[8];
+} __packed;
+
+/* SST to IA memory read debug message */
+struct ipc_sst_ia_dbg_mem_rw {
+ u16 num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
+ u16 data_type;/* enum: dbg_mem_data_type */
+ u32 address; /* Memory address of data memory of data_type */
+ u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
+} __packed;
+
+struct ipc_sst_ia_dbg_loop_back {
+ u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
+ u16 increment_val;/* Increments dwords by this value, 0- no increment */
+ u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
+} __packed;
+
+/* Stream type params struture for Alloc stream */
+struct snd_sst_str_type {
+ u8 codec_type; /* Codec type */
+ u8 str_type; /* 1 = voice 2 = music */
+ u8 operation; /* Playback or Capture */
+ u8 protected_str; /* 0=Non DRM, 1=DRM */
+ u8 time_slots;
+ u8 reserved; /* Reserved */
+ u16 result; /* Result used for acknowledgment */
+} __packed;
+
+/* Library info structure */
+struct module_info {
+ u32 lib_version;
+ u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
+ u32 media_type;
+ u8 lib_name[12];
+ u32 lib_caps;
+ unsigned char b_date[16]; /* Lib build date */
+ unsigned char b_time[16]; /* Lib build time */
+} __packed;
+
+/* Library slot info */
+struct lib_slot_info {
+ u8 slot_num; /* 1 or 2 */
+ u8 reserved1;
+ u16 reserved2;
+ u32 iram_size; /* slot size in IRAM */
+ u32 dram_size; /* slot size in DRAM */
+ u32 iram_offset; /* starting offset of slot in IRAM */
+ u32 dram_offset; /* starting offset of slot in DRAM */
+} __packed;
+
+struct snd_ppp_mixer_params {
+ __u32 type; /*Type of the parameter */
+ __u32 size;
+ __u32 input_stream_bitmap; /*Input stream Bit Map*/
+} __packed;
+
+struct snd_sst_lib_download {
+ struct module_info lib_info; /* library info type, capabilities etc */
+ struct lib_slot_info slot_info; /* slot info to be downloaded */
+ u32 mod_entry_pt;
+};
+
+struct snd_sst_lib_download_info {
+ struct snd_sst_lib_download dload_lib;
+ u16 result; /* Result used for acknowledgment */
+ u8 pvt_id; /* Private ID */
+ u8 reserved; /* for alignment */
+};
+
+struct snd_pcm_params {
+ u8 num_chan; /* 1=Mono, 2=Stereo */
+ u8 pcm_wd_sz; /* 16/24 - bit*/
+ u8 use_offload_path; /* 0-PCM using period elpased & ALSA interfaces
+ 1-PCM stream via compressed interface */
+ u8 reserved2;
+ u32 sfreq; /* Sampling rate in Hz */
+ u8 channel_map[8];
+} __packed;
+
+/* MP3 Music Parameters Message */
+struct snd_mp3_params {
+ u8 num_chan; /* 1=Mono, 2=Stereo */
+ u8 pcm_wd_sz; /* 16/24 - bit*/
+ u8 crc_check; /* crc_check - disable (0) or enable (1) */
+ u8 reserved1; /* unused*/
+};
+
+#define AAC_BIT_STREAM_ADTS 0
+#define AAC_BIT_STREAM_ADIF 1
+#define AAC_BIT_STREAM_RAW 2
+
+/* AAC Music Parameters Message */
+struct snd_aac_params {
+ u8 num_chan; /* 1=Mono, 2=Stereo*/
+ u8 pcm_wd_sz; /* 16/24 - bit*/
+ u8 bdownsample; /*SBR downsampling 0 - disable 1 -enabled AAC+ only */
+ u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
+ u32 externalsr; /*sampling rate of basic AAC raw bit stream*/
+ u8 sbr_signalling;/*disable/enable/set automode the SBR tool.AAC+*/
+ u8 reser1;
+ u16 reser2;
+};
+
+/* WMA Music Parameters Message */
+struct snd_wma_params {
+ u8 num_chan; /* 1=Mono, 2=Stereo */
+ u8 pcm_wd_sz; /* 16/24 - bit*/
+ u16 reserved1;
+ u32 brate; /* Use the hard coded value. */
+ u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
+ u32 channel_mask; /* Channel Mask */
+ u16 format_tag; /* Format Tag */
+ u16 block_align; /* packet size */
+ u16 wma_encode_opt;/* Encoder option */
+ u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
+ u8 reserved; /* reserved */
+};
+
+/* Codec params struture */
+union snd_sst_codec_params {
+ struct snd_pcm_params pcm_params;
+ struct snd_mp3_params mp3_params;
+ struct snd_aac_params aac_params;
+ struct snd_wma_params wma_params;
+};
+
+/* Address and size info of a frame buffer in DDR */
+struct sst_address_info {
+ __u32 addr; /* Address at IA */
+ __u32 size; /* Size of the buffer */
+} __packed;
+
+/* Additional params for Alloc struct*/
+struct snd_sst_alloc_params_ext {
+ __u16 sg_count;
+ __u16 reserved;
+ __u32 frag_size; /*Number of samples after which period elapsed
+ message is sent valid only if path = 0*/
+ struct sst_address_info ring_buf_info[8];
+};
+
+struct snd_sst_stream_params {
+ union snd_sst_codec_params uc;
+} __packed;
+
+struct snd_sst_params {
+ u32 result;
+ u32 stream_id;
+ u8 codec;
+ u8 ops;
+ u8 stream_type;
+ u8 device_type;
+ u8 task;
+ struct snd_sst_stream_params sparams;
+ struct snd_sst_alloc_params_ext aparams;
+};
+
+struct snd_sst_alloc_mrfld {
+ u16 codec_type;
+ u8 operation;
+ u8 sg_count;
+ struct sst_address_info ring_buf_info[8];
+ u32 frag_size;
+ u32 ts;
+ struct snd_sst_stream_params codec_params;
+} __packed;
+
+/* Alloc stream params structure */
+struct snd_sst_alloc_params {
+ struct snd_sst_str_type str_type;
+ struct snd_sst_stream_params stream_params;
+ struct snd_sst_alloc_params_ext alloc_params;
+} __packed;
+
+/* Alloc stream response message */
+struct snd_sst_alloc_response {
+ struct snd_sst_str_type str_type; /* Stream type for allocation */
+ struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
+};
+
+/* Drop response */
+struct snd_sst_drop_response {
+ u32 result;
+ u32 bytes;
+};
+
+struct snd_sst_async_msg {
+ u32 msg_id; /* Async msg id */
+ u32 payload[0];
+};
+
+struct snd_sst_async_err_msg {
+ u32 fw_resp; /* Firmware Result */
+ u32 lib_resp; /*Library result */
+} __packed;
+
+struct snd_sst_vol {
+ u32 stream_id;
+ s32 volume;
+ u32 ramp_duration;
+ u32 ramp_type; /* Ramp type, default=0 */
+};
+
+/* Gain library parameters for mrfld
+ * based on DSP command spec v0.82
+ */
+struct snd_sst_gain_v2 {
+ u16 gain_cell_num; /* num of gain cells to modify*/
+ u8 cell_nbr_idx; /* instance index*/
+ u8 cell_path_idx; /* pipe-id */
+ u16 module_id; /*module id */
+ u16 left_cell_gain; /* left gain value in dB*/
+ u16 right_cell_gain; /* right gain value in dB*/
+ u16 gain_time_const; /* gain time constant*/
+} __packed;
+
+struct snd_sst_mute {
+ u32 stream_id;
+ u32 mute;
+};
+
+struct snd_sst_runtime_params {
+ u8 type;
+ u8 str_id;
+ u8 size;
+ u8 rsvd;
+ void *addr;
+} __packed;
+
+enum stream_param_type {
+ SST_SET_TIME_SLOT = 0,
+ SST_SET_CHANNEL_INFO = 1,
+ OTHERS = 2, /*reserved for future params*/
+};
+
+/* CSV Voice call routing structure */
+struct snd_sst_control_routing {
+ u8 control; /* 0=start, 1=Stop */
+ u8 reserved[3]; /* Reserved- for 32 bit alignment */
+};
+
+struct ipc_post {
+ struct list_head node;
+ union ipc_header header; /* driver specific */
+ bool is_large;
+ bool is_process_reply;
+ union ipc_header_mrfld mrfld_header;
+ char *mailbox_data;
+};
+
+struct snd_sst_ctxt_params {
+ u32 address; /* Physical Address in DDR where the context is stored */
+ u32 size; /* size of the context */
+};
+
+struct snd_sst_lpe_log_params {
+ u8 dbg_type;
+ u8 module_id;
+ u8 log_level;
+ u8 reserved;
+} __packed;
+
+enum snd_sst_bytes_type {
+ SND_SST_BYTES_SET = 0x1,
+ SND_SST_BYTES_GET = 0x2,
+};
+
+struct snd_sst_bytes_v2 {
+ u8 type;
+ u8 ipc_msg;
+ u8 block;
+ u8 task_id;
+ u8 pipe_id;
+ u8 rsvd;
+ u16 len;
+ char bytes[0];
+};
+
+#define MAX_VTSV_FILES 2
+struct snd_sst_vtsv_info {
+ struct sst_address_info vfiles[MAX_VTSV_FILES];
+} __packed;
+
+#endif /* __PLATFORMDRV_IPC_V2_H__ */
--- /dev/null
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+# Audio Comms
+obj-$(CONFIG_SND_SOC_COMMS_SSP) += mid_ssp.o
--- /dev/null
+/*
+ * mid_ssp.c - ASoC CPU DAI driver for
+ *
+ * Copyright (C) 2011-12 Intel Corp
+ * Author: Selma Bensaid<selma.bensaid@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#define FORMAT(fmt) "%s: " fmt, __func__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
+
+#include <linux/module.h>
+#include "mid_ssp.h"
+
+
+/*
+ * Default I2S configuration
+ */
+/*
+ * TO BE DONE: use mixer to make it more flexible
+ */
+const struct intel_mid_i2s_settings ssp_platform_i2s_config = {
+ .master_mode_clk_selection = SSP_MASTER_CLOCK_UNDEFINED,
+ .master_mode_standard_freq = 0xFFFF,
+ .tx_tristate_phase = TXD_TRISTATE_LAST_PHASE_OFF,
+ .slave_clk_free_running_status =
+ SLAVE_SSPCLK_ON_DURING_TRANSFER_ONLY,
+ .ssp_duplex_mode = RX_AND_TX_MODE,
+ .ssp_trailing_byte_mode = SSP_TRAILING_BYTE_HDL_BY_IA,
+ .ssp_tx_dma = SSP_TX_DMA_ENABLE,
+ .ssp_rx_dma = SSP_RX_DMA_ENABLE,
+ .rx_fifo_interrupt = SSP_RX_FIFO_OVER_INT_ENABLE,
+ .tx_fifo_interrupt = SSP_TX_FIFO_UNDER_INT_ENABLE,
+ .ssp_rx_timeout_interrupt_status = SSP_RX_TIMEOUT_INT_DISABLE,
+ .ssp_trailing_byte_interrupt_status =
+ SSP_TRAILING_BYTE_INT_ENABLE,
+ .ssp_loopback_mode_status = SSP_LOOPBACK_OFF,
+ .ssp_rx_fifo_threshold = MID_SSP_RX_FIFO_THRESHOLD,
+ .ssp_tx_fifo_threshold = MID_SSP_TX_FIFO_THRESHOLD,
+ .ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_HIGH,
+ .ssp_end_transfer_state =
+ SSP_END_DATA_TRANSFER_STATE_LOW,
+ .ssp_psp_T1 = 0,
+ .ssp_psp_T2 = 0,
+ .ssp_psp_T4 = 0,
+ .ssp_psp_T5 = 0,
+ .ssp_psp_T6 = 1,
+};
+
+/*
+ * SSP DAI Internal functions
+ */
+
+/**
+ * ssp_dma_req - This function programs a write or read request
+ * to the Intel I2S driver
+ *
+ * @param substream Pointer to stream structure
+ * return ret_val Status
+ */
+static int ssp_dma_req(struct snd_pcm_substream *substream)
+{
+
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct intel_ssp_config *ssp_config;
+ struct snd_pcm_runtime *pl_runtime;
+ int ret;
+#ifdef _LLI_ENABLED_
+ struct intel_mid_i2s_lli *sg_table = NULL;
+ int i;
+#else
+ u32 *dma_addr;
+#endif /* _LLI_ENABLED_ */
+
+ WARN(!substream, "SSP DAI: "
+ "ERROR NULL substream\n");
+ if (!substream)
+ return -EINVAL;
+
+
+ pl_runtime = substream->runtime;
+
+ str_info = pl_runtime->private_data;
+
+ WARN(!str_info, "SSP DAI: "
+ "ERROR NULL str_info\n");
+ if (!str_info)
+ return -EINVAL;
+
+ ssp_config = str_info->ssp_config;
+
+ WARN(!ssp_config, "SSP DAI: "
+ "ERROR NULL ssp_config\n");
+ if (!ssp_config)
+ return -EINVAL;
+
+
+ WARN(!ssp_config->i2s_handle, "SSP DAI: "
+ "ERROR, trying to play a stream however "
+ "ssp_config->i2s_handle is NULL\n");
+
+ if (!ssp_config->i2s_handle)
+ return -EINVAL;
+
+#ifdef _LLI_ENABLED_
+ if (!test_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+ &str_info->stream_status)) {
+ pr_err("%s: Stream has been stopped before SSP DMA request has been taken into account",
+ __func__);
+ return 0;
+ }
+
+ /* Will be executed once until next DAI shutdown */
+ if (!test_bit(INTEL_ALSA_SSP_STREAM_INIT,
+ &str_info->stream_status)) {
+
+ str_info->length = frames_to_bytes(pl_runtime,
+ pl_runtime->period_size);
+
+ str_info->addr = substream->runtime->dma_area;
+
+ sg_table = kzalloc(sizeof(struct intel_mid_i2s_lli) *
+ pl_runtime->periods,
+ GFP_KERNEL);
+ if (sg_table == NULL) {
+ pr_err("sg_table allocation failed!");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pl_runtime->periods; i++) {
+ sg_table[i].addr = (u32 *) (str_info->addr +
+ str_info->length * i);
+ sg_table[i].leng = (u32) str_info->length;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = intel_mid_i2s_lli_wr_req(ssp_config->i2s_handle,
+ sg_table,
+ pl_runtime->periods,
+ I2S_CIRCULAR_MODE,
+ substream);
+ else
+ ret = intel_mid_i2s_lli_rd_req(ssp_config->i2s_handle,
+ sg_table,
+ pl_runtime->periods,
+ I2S_CIRCULAR_MODE,
+ substream);
+ kfree(sg_table);
+
+ if (ret != 0) {
+ pr_err("SSP DAI: %s request error",
+ (substream->stream ==
+ SNDRV_PCM_STREAM_PLAYBACK) ?
+ "write" : "read");
+ }
+
+ set_bit(INTEL_ALSA_SSP_STREAM_INIT, &str_info->stream_status);
+
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_ENABLE_SSP, NULL);
+ }
+
+ /* Executed at each TRIGGER_START */
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ SSP_CMD_ENABLE_DMA_TX_INTR :
+ SSP_CMD_ENABLE_DMA_RX_INTR, NULL);
+
+ return 0;
+#else
+ str_info->length = frames_to_bytes(pl_runtime, pl_runtime->period_size);
+
+ str_info->addr = substream->runtime->dma_area;
+ pr_debug("SSP DAI: FCT %s substream->runtime->dma_area = %p",
+ __func__, substream->runtime->dma_area);
+
+ dma_addr = (u32 *)(str_info->addr + str_info->length
+ * str_info->period_req_index);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = intel_mid_i2s_wr_req(ssp_config->i2s_handle, dma_addr,
+ str_info->length, substream);
+ else
+ ret = intel_mid_i2s_rd_req(ssp_config->i2s_handle, dma_addr,
+ str_info->length, substream);
+
+ if (ret == 0) {
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_ENABLE_SSP, NULL);
+
+ if (test_and_set_bit(INTEL_ALSA_SSP_STREAM_RUNNING,
+ &str_info->stream_status)) {
+ pr_err("SSP DAI: ERROR previous request not handled\n");
+ return -EBUSY;
+ }
+
+ if (++(str_info->period_req_index) >= pl_runtime->periods)
+ str_info->period_req_index = 0;
+ return 0;
+ } else {
+ pr_err("SSP DAI: FCT %s read/write req ERROR\n", __func__);
+ return -EINVAL;
+ }
+#endif /* _LLI_ENABLED_ */
+} /* ssp_dma_req */
+
+/**
+ * ssp_dma_complete - End of capture or playback callback
+ * called in DMA Complete Tasklet context
+ * This Callback has in charge of re-programming a new read or write
+ * request to Intel MID I2S Driver if the stream has not been Closed.
+ * It calls also the snd_pcm_period_elapsed if the stream is not
+ * PAUSED or SUSPENDED to inform ALSA Kernel that the Ring Buffer
+ * period has been sent or received properly
+ *
+ * @param param Pointer to a user data
+ * return status
+ */
+static int ssp_dma_complete(void *param)
+{
+ struct snd_pcm_substream *substream;
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct snd_pcm_runtime *pl_runtime;
+#ifndef _LLI_ENABLED_
+ bool call_back = false;
+ bool reset_index = false;
+#endif /* _LLI_ENABLED_ */
+
+ substream = (struct snd_pcm_substream *)param;
+ pl_runtime = substream->runtime;
+ str_info = substream->runtime->private_data;
+
+ WARN(!str_info, "SSP DAI: ERROR NULL str_info\n");
+ if (str_info == NULL)
+ return -EINVAL;
+
+#ifdef _LLI_ENABLED_
+ if (!test_bit(INTEL_ALSA_SSP_STREAM_INIT,
+ &str_info->stream_status)) {
+ pr_err("Stream already not initialized");
+ return 0;
+ }
+
+ if (++(str_info->period_cb_index) >= pl_runtime->periods)
+ str_info->period_cb_index = 0;
+
+ if (test_bit(INTEL_ALSA_SSP_STREAM_STARTED, &str_info->stream_status))
+ snd_pcm_period_elapsed(substream);
+ else
+ pr_debug("No call to snd_period_elapsed, stream is not started");
+#else
+ if (test_and_clear_bit(INTEL_ALSA_SSP_STREAM_RUNNING,
+ &str_info->stream_status)) {
+ bool dropped = test_and_clear_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+ &str_info->stream_status);
+ bool started = test_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+ &str_info->stream_status);
+
+ if (started) {
+ /*
+ * Whatever dropped or not,
+ * the stream is on going
+ */
+ call_back = true;
+ }
+ if (started && dropped) {
+ /*
+ * the stream has been dropped and restarted
+ * before the callback occurs
+ * in this case the we have to reprogram the
+ * requests to SSP driver
+ * and reset the stream's indexes
+ */
+ reset_index = true;
+ }
+ if (!started && !dropped) {
+ pr_err("SSP DAI: FCT %s neither started nor dropped",
+ __func__);
+ return -EBUSY;
+ }
+ } else {
+ pr_err("SSP DAI: FCT %s called while not running ", __func__);
+ return -EBUSY;
+ }
+
+ if (call_back == true) {
+ pr_debug("SSP DAI: playback/capture (REQ=%d,CB=%d): DMA_REQ_COMPLETE\n",
+ str_info->period_req_index,
+ str_info->period_cb_index);
+
+ if (reset_index) {
+ str_info->period_cb_index = 0;
+ str_info->period_req_index = 0;
+ } else if (++(str_info->period_cb_index) >= pl_runtime->periods)
+ str_info->period_cb_index = 0;
+
+ /*
+ * Launch the next Capture/Playback request if
+ * no CLOSE has been requested
+ */
+ ssp_dma_req(substream);
+
+ /*
+ * Call the snd_pcm_period_elapsed to inform ALSA kernel
+ * that a ringbuffer period has been played
+ */
+ snd_pcm_period_elapsed(substream);
+ }
+#endif /* _LLI_ENABLED_ */
+
+ return 0;
+} /* ssp_dma_complete */
+
+/**
+ * intel_mid_ssp_transfer_data - send data buffers
+ *
+ * @param work Pointer to stream structure
+ * return void
+ */
+void intel_mid_ssp_transfer_data(struct work_struct *work)
+{
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct snd_pcm_substream *substream;
+
+ BUG_ON(!work);
+
+ str_info = container_of(work, struct intel_alsa_ssp_stream_info,
+ ssp_ws);
+
+ BUG_ON(!str_info);
+
+ substream = str_info->substream;
+
+ BUG_ON(!substream);
+
+ ssp_dma_req(substream);
+
+} /* intel_mid_ssp_transfer_data */
+
+/*
+ * SSP PLATFORM
+ */
+
+/*
+ * SSP Platform functions
+ */
+static int ssp_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
+{
+ int retval = 0;
+ struct snd_soc_dai *dai;
+ struct snd_pcm *pcm;
+
+ pr_debug("SSP DAI: FCT %s enters\n",
+ __func__);
+ /*
+ * Do pre-allocation to all substreams of the given pcm for the
+ * specified DMA type.
+ *
+ */
+ dai = soc_runtime->cpu_dai;
+ pcm = soc_runtime->pcm;
+
+ if (dai->driver->playback.channels_min ||
+ dai->driver->capture.channels_min) {
+ retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ SSP_MIN_BUFFER, SSP_MAX_BUFFER);
+
+ if (retval) {
+ pr_err("DMA buffer allocation fail\n");
+ return retval;
+ }
+ }
+ return retval;
+} /* ssp_platform_pcm_new */
+
+static void ssp_platform_pcm_free(struct snd_pcm *pcm)
+{
+ pr_debug("SSP DAI: FCT %s enter\n",
+ __func__);
+ /*
+ * release all pre-allocated buffers on the pcm
+ *
+ */
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+
+} /* ssp_platform_pcm_free */
+
+/**
+ * ssp_platform_hw_params - Allocate memory for Ring Buffer according
+ * to hw_params.
+ * It's called in a non-atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param hw_params Stream command thats requested from upper layer
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_platform_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret_val;
+
+ /*
+ * Allocates the DMA buffer for the substream
+ * This callback could be called several time
+ * snd_pcm_lib_malloc_pages allows to avoid memory leak
+ * as it release already allocated memory when already allocated
+ */
+ ret_val = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+
+ if (ret_val < 0)
+ return ret_val;
+
+ memset(substream->runtime->dma_area, 0, params_buffer_bytes(hw_params));
+
+ return 0;
+} /* ssp_platform_hw_params */
+
+/*
+ * ssp_platform_pointer- to send the current buffer pointer
+ * processed by HW
+ * This function is called by ALSA framework to get the current HW buffer ptr
+ * to check the Ring Buffer Status
+ *
+ * @param substream Pointer to the substream for which the function
+ * is called
+ *
+ * return pcm_pointer Indicates the number of samples played
+ *
+ */
+static
+snd_pcm_uframes_t ssp_platform_pointer(struct snd_pcm_substream *substream)
+{
+ struct intel_alsa_ssp_stream_info *str_info;
+ unsigned long pcm_pointer = 0;
+
+ str_info = substream->runtime->private_data;
+
+ WARN(!str_info, "SSP DAI: ERROR NULL str_info\n");
+ if (!str_info)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ pcm_pointer = (unsigned long) (str_info->period_cb_index
+ * substream->runtime->period_size);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ pcm_pointer = (unsigned long) (str_info->period_cb_index
+ * substream->runtime->period_size);
+
+ pr_debug("SSP DAI: FCT %s Frame bits = %d, period_size = %d, periods = %d\n",
+ __func__,
+ (int) substream->runtime->frame_bits,
+ (int) substream->runtime->period_size,
+ (int) substream->runtime->periods);
+
+ pr_debug("SSP DAI: FCT %s returns %ld\n",
+ __func__, pcm_pointer);
+
+ return pcm_pointer;
+} /* ssp_platform_pointer */
+
+static struct snd_pcm_ops ssp_platform_ops = {
+ .open = NULL,
+ .close = NULL,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = ssp_platform_hw_params,
+ .hw_free = NULL,
+ .prepare = NULL,
+ .trigger = NULL,
+ .pointer = ssp_platform_pointer,
+};
+
+struct snd_soc_platform_driver soc_ssp_platform_drv = {
+ .ops = &ssp_platform_ops,
+ .probe = NULL,
+ .pcm_new = ssp_platform_pcm_new,
+ .pcm_free = ssp_platform_pcm_free,
+};
+
+/*
+ * SND SOC DAI OPs
+ */
+static int ssp_probe(struct snd_soc_dai *cpu_dai)
+{
+ struct intel_ssp_config *ssp_config;
+
+ pr_info("SSP DAI: FCT %s enters for CPU_DAI %d\n",
+ __func__, cpu_dai->id);
+
+ ssp_config = kzalloc(sizeof(struct intel_ssp_config), GFP_KERNEL);
+
+
+ if (ssp_config == NULL) {
+ pr_err("Unable to allocate ssp_config\n");
+ return -ENOMEM;
+ }
+
+#ifndef _LLI_ENABLED_
+ ssp_config->intel_mid_dma_alloc = false;
+#endif /* _LLI_ENABLED_ */
+ ssp_config->ssp_dai_tx_allocated = false;
+ ssp_config->ssp_dai_rx_allocated = false;
+
+ ssp_config->i2s_settings = ssp_platform_i2s_config;
+ pr_info("SSP DAI: FCT %s ssp_config %p\n",
+ __func__, ssp_config);
+
+ cpu_dai->playback_dma_data = cpu_dai->capture_dma_data = ssp_config;
+
+ return 0;
+
+} /* ssp_probe */
+
+static int ssp_remove(struct snd_soc_dai *cpu_dai)
+{
+ struct intel_ssp_config *ssp_config;
+
+ WARN(!cpu_dai, "SSP DAI: "
+ "ERROR NULL cpu_dai\n");
+ if (!cpu_dai)
+ return -EINVAL;
+
+ ssp_config = cpu_dai->playback_dma_data;
+
+ kfree(ssp_config);
+
+ return 0;
+} /* ssp_remove */
+
+static int ssp_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct intel_ssp_config *ssp_config;
+ struct snd_pcm_runtime *pl_runtime;
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct intel_ssp_info *ssp_info;
+ struct snd_soc_dai_driver *cpudai_drv = cpu_dai->driver;
+ unsigned int device;
+ int ret = 0;
+
+ WARN(!cpu_dai->driver, "SSP DAI: "
+ "FCT %s ERROR NULL cpu_dai->driver\n",
+ __func__);
+ if (!cpu_dai->driver)
+ return -EINVAL;
+
+ pr_info("SSP DAI: FCT %s enters for DAI Id = %d\n",
+ __func__, cpu_dai->driver->id);
+
+ ssp_info = dev_get_drvdata(cpu_dai->dev);
+
+ WARN(!ssp_info, "SSP DAI: ERROR NULL ssp_info\n");
+ if (!ssp_info)
+ return -EINVAL;
+
+ pl_runtime = substream->runtime;
+
+ ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+ WARN(!ssp_config, "SSP DAI: "
+ "FCT %s ERROR NULL ssp_config\n",
+ __func__);
+ if (!ssp_config)
+ return -EINVAL;
+
+
+ device = cpu_dai->driver->id;
+
+ /*
+ * setup the internal data structure stream pointers based on it being
+ * playback or capture stream
+ */
+ str_info = kzalloc(sizeof(*str_info), GFP_KERNEL);
+
+ if (!str_info) {
+ pr_err("SSP DAI: str_info alloc failure\n");
+ return -EINVAL;
+
+ }
+ str_info->substream = substream;
+ str_info->ssp_config = ssp_config;
+ str_info->stream_status = 0;
+
+ INIT_WORK(&str_info->ssp_ws, intel_mid_ssp_transfer_data);
+
+ /*
+ * Initialize SSPx [x=0,1] driver
+ * Store the Stream information
+ */
+ pl_runtime->private_data = str_info;
+
+ pr_debug("SSP DAI: FCT %s enters cpu_dai->card->name = %s\n",
+ __func__, cpu_dai->card->name);
+
+ if (!cpu_dai->active) {
+ if (!strcmp(cpudai_drv->name, SSP_BT_DAI_NAME)) {
+ ssp_config->i2s_handle =
+ intel_mid_i2s_open(SSP_USAGE_BLUETOOTH_FM);
+ pr_debug("opening the CPU_DAI for "\
+ "SSP_USAGE_BLUETOOTH_FM, i2s_handle = %p\n",
+ ssp_config->i2s_handle);
+
+ } else if (!strcmp(cpudai_drv->name, SSP_MODEM_DAI_NAME)) {
+ ssp_config->i2s_handle =
+ intel_mid_i2s_open(SSP_USAGE_MODEM);
+ pr_debug("opening the CPU_DAI for "\
+ "SSP_USAGE_MODEM, i2s_handle = %p\n",
+ ssp_config->i2s_handle);
+
+ } else {
+ pr_err("non Valid SOC CARD\n");
+ return -EINVAL;
+ }
+
+ /* Set the Write Callback */
+ ret = intel_mid_i2s_set_wr_cb(ssp_config->i2s_handle,
+ ssp_dma_complete);
+ if (ret)
+ return ret;
+
+ /* Set the Default Read Callback */
+ ret = intel_mid_i2s_set_rd_cb(ssp_config->i2s_handle,
+ ssp_dma_complete);
+ if (ret)
+ return ret;
+
+ } else {
+ /*
+ * do nothing because already Open by sibling substream
+ */
+ pr_debug("SSP DAI: FCT %s Open DO NOTHING\n",
+ __func__);
+ }
+ return 0;
+} /* ssp_dai_startup */
+
+static void ssp_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct snd_pcm_runtime *runtime;
+ struct intel_ssp_info *ssp_info;
+
+ ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+ BUG_ON(!ssp_config);
+
+ runtime = substream->runtime;
+ BUG_ON(!runtime->private_data);
+
+ str_info = runtime->private_data;
+ BUG_ON(!str_info);
+
+ ssp_info = dev_get_drvdata(cpu_dai->dev);
+ BUG_ON(!ssp_info);
+
+ /* Cancel pending work */
+ cancel_work_sync(&str_info->ssp_ws);
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ /*
+ * Only Free Tx channel if no playback streams are active
+ * Shutdown can be called right after a startup if something
+ * failed (as a concurrency issue
+ * so this case can happen
+ */
+ if ((!cpu_dai->playback_active) &&
+ (ssp_config->i2s_settings.ssp_active_tx_slots_map)) {
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_FREE_TX, NULL);
+ pr_debug("SSP DAI: FCT %s TX DMA Channel released\n",
+ __func__);
+ }
+ ssp_config->ssp_dai_tx_allocated = false;
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ /*
+ * Only Free Rx channel if no capture streams are active
+ */
+ if ((!cpu_dai->capture_active) &&
+ (ssp_config->i2s_settings.ssp_active_rx_slots_map)) {
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_FREE_RX, NULL);
+ pr_debug("SSP DAI: FCT %s RX DMA Channel released\n",
+ __func__);
+ }
+ ssp_config->ssp_dai_rx_allocated = false;
+ break;
+
+ default:
+ pr_err("SSP DAI: FCT %s Bad stream_dir: %d\n",
+ __func__, substream->stream);
+ break;
+ }
+
+#ifdef _LLI_ENABLED_
+ clear_bit(INTEL_ALSA_SSP_STREAM_INIT, &str_info->stream_status);
+#endif /* _LLI_ENABLED_ */
+
+ kfree(str_info);
+
+ if (!cpu_dai->active) {
+ pr_info("SSP DAI: FCT %s closing I2S\n",
+ __func__);
+ /*
+ * Close the Intel MID I2S connection
+ */
+ intel_mid_i2s_close(ssp_config->i2s_handle);
+
+ ssp_config->i2s_handle = NULL;
+#ifndef _LLI_ENABLED_
+ ssp_config->intel_mid_dma_alloc = false;
+#endif /* _LLI_ENABLED_ */
+ }
+
+} /* ssp_dai_shutdown */
+
+static int ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_mid_i2s_settings *i2s_config;
+
+ ssp_config = cpu_dai->playback_dma_data;
+
+ WARN(!ssp_config, "SSP DAI: FCT %s ssp_config=NULL\n",
+ __func__);
+ if (!ssp_config)
+ return -EINVAL;
+
+ pr_debug("SSP DAI: FCT %s fmt = %d\n",
+ __func__, fmt);
+
+ i2s_config = &(ssp_config->i2s_settings);
+
+ /*
+ * SSP CLK Direction
+ * SSP FRMSYNC Direction
+ */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2s_config->sspslclk_direction = SSPSCLK_MASTER_MODE;
+ i2s_config->sspsfrm_direction = SSPSCLK_MASTER_MODE;
+ /*
+ * Mandatory to be able to perform only RX without TX
+ * in SSP CLK Master Mode
+ *
+ */
+ i2s_config->ssp_duplex_mode = RX_WITHOUT_TX_MODE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ i2s_config->sspslclk_direction = SSPSCLK_MASTER_MODE;
+ i2s_config->sspsfrm_direction = SSPSCLK_SLAVE_MODE;
+ /*
+ * Mandatory to be able to perform only RX without TX
+ * in SSP CLK Master Mode
+ *
+ */
+ i2s_config->ssp_duplex_mode = RX_WITHOUT_TX_MODE;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s_config->sspslclk_direction = SSPSCLK_SLAVE_MODE;
+ i2s_config->sspsfrm_direction = SSPSCLK_SLAVE_MODE;
+ i2s_config->ssp_duplex_mode = RX_AND_TX_MODE;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ i2s_config->sspslclk_direction = SSPSCLK_SLAVE_MODE;
+ i2s_config->sspsfrm_direction = SSPSCLK_MASTER_MODE;
+ i2s_config->ssp_duplex_mode = RX_AND_TX_MODE;
+ break;
+ default:
+ pr_err("SSP DAI: %s Bad DAI CLK/FS Mode=%d\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_MASTER_MASK));
+ return -EINVAL;
+ }
+ /*
+ * SSP Sgnal Inversion Mode
+ * Use clock gating bitfield for
+ * Serial bit-rate Clock Mode
+ */
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+ case SSP_DAI_SCMODE_0:
+ i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_0;
+ break;
+ case SSP_DAI_SCMODE_1:
+ i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_1;
+ break;
+ case SSP_DAI_SCMODE_2:
+ i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_2;
+ break;
+ case SSP_DAI_SCMODE_3:
+ i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_3;
+ break;
+ default:
+ pr_err("SSP DAI: %s Bad DAI Signal Inversion Mode=%d\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_INV_MASK));
+ return -EINVAL;
+ }
+
+ /*
+ * SSP FS Inversion Mode
+ */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ case SND_SOC_DAIFMT_IB_NF:
+ i2s_config->ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_HIGH;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ case SND_SOC_DAIFMT_IB_IF:
+ i2s_config->ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_LOW;
+ break;
+ default:
+ pr_err("SSP DAI: %s Bad DAI FS Inversion Mode=%d\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_INV_MASK));
+ return -EINVAL;
+ }
+
+ /*
+ * SSP Format Mode
+ */
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ i2s_config->frame_format = PSP_FORMAT;
+ break;
+
+ default:
+ pr_err("SSP DAI: %s Bad DAI format Mode=%d\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_FORMAT_MASK));
+ return -EINVAL;
+ }
+ return 0;
+} /* ssp_set_dai_fmt */
+
+static int ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_mid_i2s_settings *i2s_config;
+
+ ssp_config = cpu_dai->playback_dma_data;
+
+ WARN(!ssp_config, "SSP DAI: FCT %s ssp_config=NULL\n",
+ __func__);
+ if (!ssp_config)
+ return -EINVAL;
+
+
+ i2s_config = &(ssp_config->i2s_settings);
+
+ i2s_config->frame_rate_divider_control = slots;
+ i2s_config->data_size = slot_width;
+ i2s_config->mode = SSP_IN_NETWORK_MODE;
+ i2s_config->ssp_active_tx_slots_map = tx_mask;
+ i2s_config->ssp_active_rx_slots_map = rx_mask;
+
+ pr_debug("i2s_config->frame_rate_divider_control = %d\n",
+ i2s_config->frame_rate_divider_control);
+ pr_debug("i2s_config->data_size = %d\n",
+ i2s_config->data_size);
+ pr_debug("i2s_config->mode = %d\n",
+ i2s_config->mode);
+ pr_debug("i2s_config->ssp_active_tx_slots_map = %d\n",
+ i2s_config->ssp_active_tx_slots_map);
+ pr_debug("i2s_config->ssp_active_rx_slots_map = %d\n",
+ i2s_config->ssp_active_rx_slots_map);
+
+ return 0;
+}
+
+static int ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_mid_i2s_settings *i2s_config;
+
+ ssp_config = cpu_dai->playback_dma_data;
+
+ BUG_ON(!ssp_config);
+
+ i2s_config = &(ssp_config->i2s_settings);
+
+ pr_debug("SSP DAI: FCT %s clk_id = %d\n",
+ __func__, clk_id);
+
+ switch (clk_id) {
+ case SSP_CLK_ONCHIP:
+ i2s_config->master_mode_clk_selection = SSP_ONCHIP_CLOCK;
+ break;
+ case SSP_CLK_NET:
+ i2s_config->master_mode_clk_selection = SSP_NETWORK_CLOCK;
+ break;
+ case SSP_CLK_EXT:
+ i2s_config->master_mode_clk_selection = SSP_EXTERNAL_CLOCK;
+ break;
+ case SSP_CLK_AUDIO:
+ i2s_config->master_mode_clk_selection = SSP_ONCHIP_AUDIO_CLOCK;
+ break;
+ default:
+ i2s_config->master_mode_standard_freq =
+ SSP_MASTER_CLOCK_UNDEFINED;
+ pr_err("SSP DAI: %s Bad clk_id=%d\n",
+ __func__,
+ clk_id);
+ return -EINVAL;
+ }
+
+ pr_debug("SSP DAI:FCT %s freq = %d\n",
+ __func__, freq);
+
+ switch (freq) {
+ case 8000:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_8_000;
+ i2s_config->ssp_psp_T1 = 0;
+ i2s_config->ssp_psp_T2 = 1;
+ i2s_config->ssp_psp_T4 = 0;
+ i2s_config->ssp_psp_T5 = 0;
+ i2s_config->ssp_psp_T6 = 1;
+ break;
+
+ case 11025:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_11_025;
+ pr_err("SSP DAI: %s Bad freq_out=%d\n",
+ __func__,
+ freq);
+ return -EINVAL;
+
+ case 16000:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_16_000;
+ i2s_config->ssp_psp_T1 = 6;
+ i2s_config->ssp_psp_T2 = 2;
+ i2s_config->ssp_psp_T4 = 0;
+ i2s_config->ssp_psp_T5 = 14;
+ i2s_config->ssp_psp_T6 = 16;
+ break;
+
+ case 22050:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_22_050;
+ pr_err("SSP DAI: %s Bad freq_out=%d\n",
+ __func__,
+ freq);
+ return -EINVAL;
+
+ case 44100:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_44_100;
+ pr_err("SSP DAI: %s Bad freq_out=%d\n",
+ __func__,
+ freq);
+ return -EINVAL;
+
+ case 48000:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_48_000;
+ i2s_config->ssp_psp_T1 = 6;
+ i2s_config->ssp_psp_T2 = 2;
+ i2s_config->ssp_psp_T4 = 0;
+ i2s_config->ssp_psp_T5 = 14;
+ i2s_config->ssp_psp_T6 = 16;
+ break;
+
+ default:
+ i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_UNDEFINED;
+ pr_err("SSP DAI: %s Bad freq_out=%d\n",
+ __func__,
+ freq);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai,
+ int tristate)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_mid_i2s_settings *i2s_config;
+
+ ssp_config = cpu_dai->playback_dma_data;
+
+ BUG_ON(!ssp_config);
+
+ i2s_config = &(ssp_config->i2s_settings);
+
+ if (IS_TRISTATE_ENABLED(tristate))
+ i2s_config->tx_tristate_enable = TXD_TRISTATE_ON;
+ else
+ i2s_config->tx_tristate_enable = TXD_TRISTATE_OFF;
+
+ if (IS_NEXT_FRMS_ASSERTED_WITH_LSB_PREVIOUS_FRM(tristate))
+ i2s_config->ssp_frmsync_timing_bit =
+ NEXT_FRMS_ASS_WITH_LSB_PREVIOUS_FRM;
+ else
+ i2s_config->ssp_frmsync_timing_bit =
+ NEXT_FRMS_ASS_AFTER_END_OF_T4;
+
+ pr_debug("FCT %s tristate %x\n", __func__, tristate);
+
+ return 0;
+}
+
+/**
+ * ssp_dai_trigger- stream activities are handled here
+ * This function is called whenever a stream activity is invoked
+ * The Trigger function is called in an atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param cmd The stream command thats requested from upper layer
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *cpu_dai)
+{
+ int ret_val = 0;
+ struct intel_alsa_ssp_stream_info *str_info;
+ struct snd_pcm_runtime *pl_runtime;
+ struct intel_ssp_info *ssp_info;
+#ifdef _LLI_ENABLED_
+ struct intel_ssp_config *ssp_config;
+#endif /* _LLI_ENABLED_ */
+
+ bool trigger_start = true;
+ int stream = 0;
+
+ pr_debug("SSP DAI: FCT %s enters\n",
+ __func__);
+
+ stream = substream->stream;
+
+
+ pl_runtime = substream->runtime;
+
+ WARN(!pl_runtime->private_data, "SSP DAI: ERROR "
+ "NULL pl_runtime->private_data\n");
+ if (!pl_runtime->private_data)
+ return -EINVAL;
+
+ WARN(!cpu_dai, "SSP DAI: ERROR NULL cpu_dai\n");
+ if (!cpu_dai)
+ return -EINVAL;
+
+ WARN(!cpu_dai->dev, "SSP DAI: ERROR NULL cpu_dai->dev\n");
+ if (!cpu_dai->dev)
+ return -EINVAL;
+
+ ssp_info = dev_get_drvdata(cpu_dai->dev);
+
+
+ WARN(!ssp_info->ssp_dai_wq, "SSP DAI: ERROR NULL ssp_dai_wq\n");
+ if (!ssp_info->ssp_dai_wq)
+ return -EINVAL;
+
+ str_info = pl_runtime->private_data;
+
+#ifdef _LLI_ENABLED_
+ ssp_config = str_info->ssp_config;
+
+ WARN(!ssp_config, "SSP DAI: ERROR NULL ssp_config\n");
+ if (!ssp_config)
+ return -EINVAL;
+#endif /* _LLI_ENABLED_ */
+
+ pr_debug("SSP DAI: FCT %s CMD = 0x%04X\n",
+ __func__, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!test_and_set_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+ &str_info->stream_status)) {
+#ifndef _LLI_ENABLED_
+ if (test_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+ &str_info->stream_status)) {
+ pr_debug("SSP DAI: FCT %s do not restart the trigger stream running already\n",
+ __func__);
+ trigger_start = false;
+ } else
+#endif /* _LLI_ENABLED_ */
+ trigger_start = true;
+ } else {
+ pr_err("SSP DAI: ERROR 2 consecutive TRIGGER_START\n");
+ return -EBUSY;
+ }
+
+ /* Store the substream locally */
+ if (trigger_start) {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+
+ pr_debug("SSP DAI: queue Playback Work\n");
+ queue_work(ssp_info->ssp_dai_wq,
+ &str_info->ssp_ws);
+ } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
+
+ pr_debug("SSP DAI: queue Capture Work\n");
+ queue_work(ssp_info->ssp_dai_wq,
+ &str_info->ssp_ws);
+ } else {
+ pr_err("SSP DAI: SNDRV_PCM_TRIGGER_START Bad Stream: %d\n",
+ substream->stream);
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (test_and_clear_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+ &str_info->stream_status)) {
+#ifdef _LLI_ENABLED_
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ SSP_CMD_DISABLE_DMA_TX_INTR :
+ SSP_CMD_DISABLE_DMA_RX_INTR,
+ NULL);
+#else
+ set_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+ &str_info->stream_status);
+#endif /* _LLI_ENABLED_ */
+ } else {
+ pr_err("SSP DAI: trigger START/STOP mismatch\n");
+ return -EBUSY;
+ }
+ break;
+
+ default:
+ pr_err("SSP DAI: snd_i2s_alsa_pcm_trigger Bad Command\n");
+ return -EINVAL;
+ }
+ return ret_val;
+} /* ssp_dai_trigger */
+
+/**
+ * ssp_dai_hw_params - Allocate memory for Ring Buffer according
+ * to hw_params.
+ * It's called in a non-atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param hw_params Stream command thats requested from upper layer
+ * @param cpu_dai Pointer to the CPU DAI that is used
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *cpu_dai)
+{
+
+ return 0;
+}
+
+static int ssp_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct intel_ssp_config *ssp_config;
+ struct intel_ssp_info *ssp_info;
+
+ pr_debug("SSP DAI: FCT %s enters\n",
+ __func__);
+
+ WARN(!cpu_dai, "SSP DAI: ERROR NULL cpu_dai\n");
+ if (!cpu_dai)
+ return -EINVAL;
+
+ ssp_info = dev_get_drvdata(cpu_dai->dev);
+ WARN(!ssp_info, "SSP DAI: ERROR NULL ssp_info\n");
+ if (!ssp_info)
+ return -EINVAL;
+
+ ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ pr_debug("SSP DAI: FCT %s ssp_dai_tx_allocated %d "\
+ "ssp_dai_rx_allocated %d\n",
+ __func__,
+ ssp_config->ssp_dai_tx_allocated,
+ ssp_config->ssp_dai_rx_allocated);
+
+ /*
+ * The set HW Config is only once for a CPU DAI
+ */
+
+ if (!ssp_config->ssp_dai_tx_allocated &&
+ !ssp_config->ssp_dai_rx_allocated) {
+ intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_SET_HW_CONFIG,
+ &(ssp_config->i2s_settings));
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ if (!ssp_config->ssp_dai_tx_allocated) {
+ if (intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_ALLOC_TX, NULL)) {
+ pr_err("can not alloc TX DMA Channel\n");
+ return -EBUSY;
+ }
+ ssp_config->ssp_dai_tx_allocated = true;
+ }
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ if (!ssp_config->ssp_dai_rx_allocated) {
+ if (intel_mid_i2s_command(ssp_config->i2s_handle,
+ SSP_CMD_ALLOC_RX, NULL)) {
+ pr_err("can not alloc RX DMA Channel\n");
+ return -EBUSY;
+ }
+ ssp_config->ssp_dai_rx_allocated = true;
+ }
+ break;
+
+ default:
+ pr_err("SSP DAI: FCT %s Bad stream_dir: %d\n",
+ __func__, substream->stream);
+ return -EINVAL;
+ }
+
+#ifndef _LLI_ENABLED_
+ ssp_config->intel_mid_dma_alloc = true;
+#endif /* _LLI_ENABLED_ */
+
+ pr_debug("SSP DAI: FCT %s leaves\n",
+ __func__);
+
+ return 0;
+}
+
+
+
+/* BT/FM */
+static struct snd_soc_dai_ops ssp_dai_ops = {
+ .startup = ssp_dai_startup,
+ .shutdown = ssp_dai_shutdown,
+ .trigger = ssp_dai_trigger,
+ .hw_params = ssp_dai_hw_params,
+ .prepare = ssp_dai_prepare,
+ .set_sysclk = ssp_set_dai_sysclk,
+ .set_pll = NULL,
+ .set_fmt = ssp_set_dai_fmt,
+ .set_tdm_slot = ssp_set_dai_tdm_slot,
+ .set_tristate = ssp_set_dai_tristate,
+};
+
+#define SSP_SUPPORTED_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define SSP_SUPPORTED_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_U16_LE | \
+ SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_U8)
+
+struct snd_soc_dai_driver intel_ssp_platform_dai[] = {
+{
+ .name = SSP_MODEM_DAI_NAME,
+ .id = 0,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SSP_SUPPORTED_RATES,
+ .formats = SSP_SUPPORTED_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SSP_SUPPORTED_RATES,
+ .formats = SSP_SUPPORTED_FORMATS,
+ },
+ .ops = &ssp_dai_ops,
+ .probe = ssp_probe,
+ .remove = ssp_remove,
+},
+{
+ .name = SSP_BT_DAI_NAME,
+ .id = 1,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SSP_SUPPORTED_RATES,
+ .formats = SSP_SUPPORTED_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SSP_SUPPORTED_RATES,
+ .formats = SSP_SUPPORTED_FORMATS,
+ },
+ .ops = &ssp_dai_ops,
+ .probe = ssp_probe,
+ .remove = ssp_remove,
+},
+};
+
+static const struct snd_soc_component_driver ssp_component = {
+ .name = "ssp",
+};
+
+static int ssp_dai_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct intel_ssp_info *ssp_info;
+
+ pr_info("SSP DAI: FCT %s enters\n",
+ __func__);
+
+ ssp_info = kzalloc(sizeof(struct intel_ssp_info), GFP_KERNEL);
+
+ if (ssp_info == NULL) {
+ pr_err("Unable to allocate ssp_info\n");
+ return -ENOMEM;
+ }
+ pr_info("ssp_info address %p", ssp_info);
+
+ ret = snd_soc_register_platform(&pdev->dev,
+ &soc_ssp_platform_drv);
+ if (ret) {
+ pr_err("registering SSP PLATFORM failed\n");
+ snd_soc_unregister_component(&pdev->dev);
+ kfree(ssp_info);
+ return -EBUSY;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev, &ssp_component,
+ intel_ssp_platform_dai,
+ ARRAY_SIZE(intel_ssp_platform_dai));
+
+ if (ret) {
+ pr_err("registering cpu DAIs failed\n");
+ snd_soc_unregister_component(&pdev->dev);
+ kfree(ssp_info);
+ return -EBUSY;
+ }
+
+ ssp_info->ssp_dai_wq = create_workqueue("ssp_transfer_data");
+
+ if (!ssp_info->ssp_dai_wq) {
+ pr_err("work queue failed\n");
+ snd_soc_unregister_component(&pdev->dev);
+ kfree(ssp_info);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, ssp_info);
+
+ pr_info("SSP DAI: FCT %s leaves %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int ssp_dai_remove(struct platform_device *pdev)
+{
+ struct intel_ssp_info *ssp_info = platform_get_drvdata(pdev);
+
+ pr_debug("SSP DAI: FCT %s enters\n",
+ __func__);
+
+ if (ssp_info == NULL) {
+ pr_err("Unable to allocate ssp_info\n");
+ return -ENOMEM;
+ }
+ pr_info("ssp_info address %p", ssp_info);
+
+ flush_workqueue(ssp_info->ssp_dai_wq);
+
+ destroy_workqueue(ssp_info->ssp_dai_wq);
+
+ platform_set_drvdata(pdev, NULL);
+
+ snd_soc_unregister_component(&pdev->dev);
+
+ snd_soc_unregister_platform(&pdev->dev);
+
+ pr_debug("SSP DAI: FCT %s leaves\n",
+ __func__);
+
+ return 0;
+}
+
+static struct platform_driver intel_ssp_dai_driver = {
+ .driver = {
+ .name = "mid-ssp-dai",
+ .owner = THIS_MODULE,
+ },
+ .probe = ssp_dai_probe,
+ .remove = ssp_dai_remove,
+};
+
+
+static int __init ssp_soc_dai_init(void)
+{
+ pr_info("SSP DAI: FCT %s called\n",
+ __func__);
+
+ return platform_driver_register(&intel_ssp_dai_driver);
+}
+module_init(ssp_soc_dai_init);
+
+static void __exit ssp_soc_dai_exit(void)
+{
+ pr_debug("SSP DAI: FCT %s called\n",
+ __func__);
+
+ platform_driver_unregister(&intel_ssp_dai_driver);
+
+}
+module_exit(ssp_soc_dai_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Selma Bensaid");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ssp-cpu-dai");
--- /dev/null
+/*
+ * mfld_ssp.h - ASoC CPU DAI driver for
+ *
+ * Copyright (C) 2011-12 Intel Corp
+ * Authors: Selma Bensaid <selma.bensaid@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef MID_SSP_H_
+#define MID_SSP_H_
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/info.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <linux/intel_mid_i2s_if.h>
+
+#define SSP_MODEM_DAI_NAME "ssp-modem-cpu-dai"
+#define SSP_BT_DAI_NAME "ssp-bt-cpu-dai"
+
+#define SSP_MAX_BUFFER (640*1024)
+#define SSP_MIN_BUFFER (640*1024)
+
+#define TRISTATE_BIT 0
+#define FRAME_SYNC_RELATIVE_TIMING_BIT 1
+#define DUMMY_START_ONE_PERIOD_OFFSET 2
+#define DUMMY_START_ONE_PERIOD_MASK 0x3
+
+#define IS_TRISTATE_ENABLED(x) (x & BIT(TRISTATE_BIT))
+#define IS_NEXT_FRMS_ASSERTED_WITH_LSB_PREVIOUS_FRM(x) \
+ ((x & BIT(FRAME_SYNC_RELATIVE_TIMING_BIT)) \
+ >> FRAME_SYNC_RELATIVE_TIMING_BIT)
+#define IS_DUMMY_START_ONE_PERIOD_OFFSET(x) \
+ ((x >> DUMMY_START_ONE_PERIOD_OFFSET) \
+ & DUMMY_START_ONE_PERIOD_MASK)
+
+#define MID_SSP_RX_FIFO_THRESHOLD 8
+#define MID_SSP_TX_FIFO_THRESHOLD 7
+
+
+/* data driven FALLING, data sampled RISING, idle LOW */
+#define SSP_DAI_SCMODE_0 (1 << 4)
+/* data driven RISING, data sampled FALLING, idle LOW */
+#define SSP_DAI_SCMODE_1 (2 << 4)
+/* data driven RISING, data sampled FALLING, idle HIGH */
+#define SSP_DAI_SCMODE_2 (3 << 4)
+/* data driven FALLING, data sampled RISING, idle HIGH */
+#define SSP_DAI_SCMODE_3 (4 << 4)
+
+
+/*
+ * Structures Definition
+ */
+
+
+struct intel_ssp_config {
+ struct intel_mid_i2s_hdl *i2s_handle;
+ struct intel_mid_i2s_settings i2s_settings;
+#ifndef _LLI_ENABLED_
+ bool intel_mid_dma_alloc;
+#endif /* _LLI_ENABLED_ */
+ bool ssp_dai_tx_allocated;
+ bool ssp_dai_rx_allocated;
+};
+
+struct intel_ssp_info {
+ struct workqueue_struct *ssp_dai_wq;
+};
+
+struct intel_alsa_ssp_stream_info {
+ struct snd_pcm_substream *substream;
+ struct work_struct ssp_ws;
+ struct intel_ssp_config *ssp_config;
+ unsigned long stream_status;
+ u32 period_req_index;
+ s32 period_cb_index;
+ u8 *addr;
+ int length;
+};
+
+
+/*
+ * Enum Definition
+ */
+
+enum intel_alsa_ssp_stream_status {
+ INTEL_ALSA_SSP_STREAM_INIT = 0,
+ INTEL_ALSA_SSP_STREAM_STARTED,
+ INTEL_ALSA_SSP_STREAM_RUNNING,
+ INTEL_ALSA_SSP_STREAM_PAUSED,
+ INTEL_ALSA_SSP_STREAM_DROPPED,
+};
+enum ssp_clk_def {
+ SSP_CLK_ONCHIP = 0x0,
+ SSP_CLK_NET,
+ SSP_CLK_EXT,
+ SSP_CLK_AUDIO
+};
+
+
+#endif /* MID_SSP_H_ */
--- /dev/null
+# Makefile for SST Audio driver
+snd-intel-sst-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_dsp.o sst_pvt.o sst_app_interface.o sst_acpi.o
+
+ifdef CONFIG_DEBUG_FS
+ snd-intel-sst-objs += sst_debug.o
+endif
+
+obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
+
+
+CFLAGS_snd-intel-sst.o = -I$(src)
+
+ccflags-y += -DMRFLD_WORD_WA -Werror
--- /dev/null
+/*
+ * sst.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This driver enumerates the SST audio engine as a PCI or ACPI device and
+ * provides interface to the platform driver to interact with the SST audio
+ * Firmware.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/async.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/platform_sst.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define CREATE_TRACE_POINTS
+#include "sst_trace.h"
+
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
+MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(SST_DRIVER_VERSION);
+
+struct intel_sst_drv *sst_drv_ctx;
+static struct mutex drv_ctx_lock;
+
+/*
+ * * ioctl32 compat
+ * */
+#ifdef CONFIG_COMPAT
+#include "sst_app_compat_interface.c"
+#else
+#define intel_sst_ioctl_compat NULL
+#endif
+
+static const struct file_operations intel_sst_fops_cntrl = {
+ .owner = THIS_MODULE,
+ .open = intel_sst_open_cntrl,
+ .release = intel_sst_release_cntrl,
+ .unlocked_ioctl = intel_sst_ioctl,
+ .compat_ioctl = intel_sst_ioctl_compat,
+};
+
+struct miscdevice lpe_ctrl = {
+ .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
+ .name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
+ .fops = &intel_sst_fops_cntrl
+};
+
+static inline void set_imr_interrupts(struct intel_sst_drv *ctx, bool enable)
+{
+ union interrupt_reg imr;
+
+ spin_lock(&ctx->ipc_spin_lock);
+ imr.full = sst_shim_read(ctx->shim, SST_IMRX);
+ if (enable) {
+ imr.part.done_interrupt = 0;
+ imr.part.busy_interrupt = 0;
+ } else {
+ imr.part.done_interrupt = 1;
+ imr.part.busy_interrupt = 1;
+ }
+ sst_shim_write(ctx->shim, SST_IMRX, imr.full);
+ spin_unlock(&ctx->ipc_spin_lock);
+}
+
+#define SST_IS_PROCESS_REPLY(header) ((header & PROCESS_MSG) ? true : false)
+#define SST_VALIDATE_MAILBOX_SIZE(size) ((size <= SST_MAILBOX_SIZE) ? true : false)
+
+static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
+{
+ union interrupt_reg_mrfld isr;
+ union ipc_header_mrfld header;
+ union sst_imr_reg_mrfld imr;
+ struct ipc_post *msg = NULL;
+ unsigned int size = 0;
+ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+ irqreturn_t retval = IRQ_HANDLED;
+
+ /* Interrupt arrived, check src */
+ isr.full = sst_shim_read64(drv->shim, SST_ISRX);
+ if (isr.part.done_interrupt) {
+ /* Clear done bit */
+ spin_lock(&drv->ipc_spin_lock);
+ header.full = sst_shim_read64(drv->shim,
+ drv->ipc_reg.ipcx);
+ header.p.header_high.part.done = 0;
+ sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
+ /* write 1 to clear status register */;
+ isr.part.done_interrupt = 1;
+ sst_shim_write64(drv->shim, SST_ISRX, isr.full);
+ spin_unlock(&drv->ipc_spin_lock);
+ trace_sst_ipc("ACK <-", header.p.header_high.full,
+ header.p.header_low_payload,
+ header.p.header_high.part.drv_id);
+ queue_work(drv->post_msg_wq, &drv->ipc_post_msg.wq);
+ retval = IRQ_HANDLED;
+ }
+ if (isr.part.busy_interrupt) {
+ spin_lock(&drv->ipc_spin_lock);
+ imr.full = sst_shim_read64(drv->shim, SST_IMRX);
+ imr.part.busy_interrupt = 1;
+ sst_shim_write64(drv->shim, SST_IMRX, imr.full);
+ spin_unlock(&drv->ipc_spin_lock);
+ header.full = sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
+ if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
+ pr_err("No memory available\n");
+ drv->ops->clear_interrupt();
+ return IRQ_HANDLED;
+ }
+ if (header.p.header_high.part.large) {
+ size = header.p.header_low_payload;
+ if (SST_VALIDATE_MAILBOX_SIZE(size)) {
+ memcpy_fromio(msg->mailbox_data,
+ drv->mailbox + drv->mailbox_recv_offset, size);
+ } else {
+ pr_err("Mailbox not copied, payload siz is: %u\n", size);
+ header.p.header_low_payload = 0;
+ }
+ }
+ msg->mrfld_header = header;
+ msg->is_process_reply =
+ SST_IS_PROCESS_REPLY(header.p.header_high.part.msg_id);
+ trace_sst_ipc("REPLY <-", msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload,
+ msg->mrfld_header.p.header_high.part.drv_id);
+ spin_lock(&drv->rx_msg_lock);
+ list_add_tail(&msg->node, &drv->rx_list);
+ spin_unlock(&drv->rx_msg_lock);
+ drv->ops->clear_interrupt();
+ retval = IRQ_WAKE_THREAD;
+ }
+ return retval;
+}
+
+static irqreturn_t intel_sst_irq_thread_mfld(int irq, void *context)
+{
+ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+ struct ipc_post *__msg, *msg = NULL;
+ unsigned long irq_flags;
+
+ if (list_empty(&drv->rx_list))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+ list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
+
+ list_del(&msg->node);
+ spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+ if (msg->is_process_reply)
+ drv->ops->process_message(msg);
+ else
+ drv->ops->process_reply(msg);
+
+ if (msg->is_large)
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+ }
+ spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+ return IRQ_HANDLED;
+}
+/**
+* intel_sst_interrupt - Interrupt service routine for SST
+*
+* @irq: irq number of interrupt
+* @context: pointer to device structre
+*
+* This function is called by OS when SST device raises
+* an interrupt. This will be result of write in IPC register
+* Source can be busy or done interrupt
+*/
+static irqreturn_t intel_sst_intr_mfld(int irq, void *context)
+{
+ union interrupt_reg isr;
+ union ipc_header header;
+ irqreturn_t retval = IRQ_HANDLED;
+ struct ipc_post *msg = NULL;
+ unsigned int size = 0;
+ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+
+ /* Interrupt arrived, check src */
+ isr.full = sst_shim_read(drv->shim, SST_ISRX);
+ if (isr.part.done_interrupt) {
+ /* Mask all interrupts till this one is processsed */
+ set_imr_interrupts(drv, false);
+ /* Clear done bit */
+ spin_lock(&drv->ipc_spin_lock);
+ header.full = sst_shim_read(drv->shim, drv->ipc_reg.ipcx);
+ header.part.done = 0;
+ sst_shim_write(drv->shim, drv->ipc_reg.ipcx, header.full);
+ /* write 1 to clear status register */;
+ isr.part.done_interrupt = 1;
+ sst_shim_write(drv->shim, SST_ISRX, isr.full);
+ spin_unlock(&drv->ipc_spin_lock);
+ queue_work(drv->post_msg_wq, &sst_drv_ctx->ipc_post_msg.wq);
+
+ /* Un mask done and busy intr */
+ set_imr_interrupts(drv, true);
+ retval = IRQ_HANDLED;
+ }
+ if (isr.part.busy_interrupt) {
+ /* Mask all interrupts till we process it in bottom half */
+ set_imr_interrupts(drv, false);
+ header.full = sst_shim_read(drv->shim, drv->ipc_reg.ipcd);
+ if (sst_create_ipc_msg(&msg, header.part.large)) {
+ pr_err("No memory available\n");
+ drv->ops->clear_interrupt();
+ return IRQ_HANDLED;
+ }
+ if (header.part.large) {
+ size = header.part.data;
+ if (SST_VALIDATE_MAILBOX_SIZE(size)) {
+ memcpy_fromio(msg->mailbox_data,
+ drv->mailbox + drv->mailbox_recv_offset + 4, size);
+ } else {
+ pr_err("Mailbox not copied, payload siz is: %u\n", size);
+ header.part.data = 0;
+ }
+ }
+ msg->header = header;
+ msg->is_process_reply =
+ SST_IS_PROCESS_REPLY(msg->header.part.msg_id);
+ spin_lock(&drv->rx_msg_lock);
+ list_add_tail(&msg->node, &drv->rx_list);
+ spin_unlock(&drv->rx_msg_lock);
+ drv->ops->clear_interrupt();
+ retval = IRQ_WAKE_THREAD;
+ }
+ return retval;
+}
+
+static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
+{
+ unsigned int pvt_id;
+ struct ipc_post *msg = NULL;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct sst_block *block;
+
+ /*send msg to fw*/
+ pvt_id = sst_assign_pvt_id(sst);
+ if (sst_create_block_and_ipc_msg(&msg, true, sst, &block,
+ IPC_CMD, pvt_id)) {
+ pr_err("msg/block alloc failed. Not proceeding with context save\n");
+ return 0;
+ }
+
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ SST_TASK_ID_MEDIA, 1, pvt_id);
+ msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+ sst_fill_header_dsp(&dsp_hdr, IPC_PREP_D3, PIPE_RSVD, pvt_id);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+
+ sst_add_to_dispatch_list_and_post(sst, msg);
+ /*wait for reply*/
+ if (sst_wait_timeout(sst, block)) {
+ pr_err("sst: err fw context save timeout ...\n");
+ pr_err("not suspending FW!!!");
+ sst_free_block(sst, block);
+ return -EIO;
+ }
+ if (block->ret_code) {
+ pr_err("fw responded w/ error %d", block->ret_code);
+ sst_free_block(sst, block);
+ return -EIO;
+ }
+
+ sst_free_block(sst, block);
+ return 0;
+}
+
+static int sst_save_dsp_context(struct intel_sst_drv *sst)
+{
+ struct snd_sst_ctxt_params fw_context;
+ unsigned int pvt_id;
+ struct ipc_post *msg = NULL;
+ struct sst_block *block;
+ pr_debug("%s: Enter\n", __func__);
+
+ /*send msg to fw*/
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ if (sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+ IPC_IA_GET_FW_CTXT, pvt_id)) {
+ pr_err("msg/block alloc failed. Not proceeding with context save\n");
+ return -ENOMEM;
+ }
+ sst_fill_header(&msg->header, IPC_IA_GET_FW_CTXT, 1, pvt_id);
+ msg->header.part.data = sizeof(fw_context) + sizeof(u32);
+ fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
+ fw_context.size = FW_CONTEXT_MEM;
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32),
+ &fw_context, sizeof(fw_context));
+ sst_add_to_dispatch_list_and_post(sst, msg);
+ /*wait for reply*/
+ if (sst_wait_timeout(sst_drv_ctx, block))
+ pr_err("sst: err fw context save timeout ...\n");
+ pr_debug("fw context saved ...\n");
+ if (block->ret_code)
+ sst_drv_ctx->fw_cntx_size = 0;
+ else
+ sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
+ pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
+ sst_free_block(sst_drv_ctx, block);
+ return 0;
+}
+
+static struct intel_sst_ops mrfld_ops = {
+ .interrupt = intel_sst_interrupt_mrfld,
+ .irq_thread = intel_sst_irq_thread_mfld,
+ .clear_interrupt = intel_sst_clear_intr_mrfld,
+ .start = sst_start_mrfld,
+ .reset = intel_sst_reset_dsp_mrfld,
+ .post_message = sst_post_message_mrfld,
+ .sync_post_message = sst_sync_post_message_mrfld,
+ .process_message = sst_process_message_mrfld,
+ .process_reply = sst_process_reply_mrfld,
+ .save_dsp_context = sst_save_dsp_context_v2,
+ .alloc_stream = sst_alloc_stream_mrfld,
+ .post_download = sst_post_download_mrfld,
+ .do_recovery = sst_do_recovery_mrfld,
+};
+
+static struct intel_sst_ops mrfld_32_ops = {
+ .interrupt = intel_sst_intr_mfld,
+ .irq_thread = intel_sst_irq_thread_mfld,
+ .clear_interrupt = intel_sst_clear_intr_mfld,
+ .start = sst_start_mrfld,
+ .reset = intel_sst_reset_dsp_mrfld,
+ .post_message = sst_post_message_mfld,
+ .sync_post_message = sst_sync_post_message_mfld,
+ .process_message = sst_process_message_mfld,
+ .process_reply = sst_process_reply_mfld,
+ .save_dsp_context = sst_save_dsp_context,
+ .restore_dsp_context = sst_restore_fw_context,
+ .alloc_stream = sst_alloc_stream_ctp,
+ .post_download = sst_post_download_byt,
+ .do_recovery = sst_do_recovery,
+};
+
+static struct intel_sst_ops ctp_ops = {
+ .interrupt = intel_sst_intr_mfld,
+ .irq_thread = intel_sst_irq_thread_mfld,
+ .clear_interrupt = intel_sst_clear_intr_mfld,
+ .start = sst_start_mfld,
+ .reset = intel_sst_reset_dsp_mfld,
+ .post_message = sst_post_message_mfld,
+ .sync_post_message = sst_sync_post_message_mfld,
+ .process_message = sst_process_message_mfld,
+ .process_reply = sst_process_reply_mfld,
+ .set_bypass = intel_sst_set_bypass_mfld,
+ .save_dsp_context = sst_save_dsp_context,
+ .restore_dsp_context = sst_restore_fw_context,
+ .alloc_stream = sst_alloc_stream_ctp,
+ .post_download = sst_post_download_ctp,
+ .do_recovery = sst_do_recovery,
+};
+
+int sst_driver_ops(struct intel_sst_drv *sst)
+{
+
+ switch (sst->pci_id) {
+ case SST_MRFLD_PCI_ID:
+ sst->tstamp = SST_TIME_STAMP_MRFLD;
+ sst->ops = &mrfld_ops;
+ return 0;
+ case SST_BYT_PCI_ID:
+ sst->tstamp = SST_TIME_STAMP_BYT;
+ sst->ops = &mrfld_32_ops;
+ return 0;
+ case SST_CLV_PCI_ID:
+ sst->tstamp = SST_TIME_STAMP;
+ sst->ops = &ctp_ops;
+ return 0;
+ default:
+ pr_err("SST Driver capablities missing for pci_id: %x", sst->pci_id);
+ return -EINVAL;
+ };
+}
+
+int sst_alloc_drv_context(struct device *dev)
+{
+ struct intel_sst_drv *ctx;
+ mutex_lock(&drv_ctx_lock);
+ if (sst_drv_ctx) {
+ pr_err("Only one sst handle is supported\n");
+ mutex_unlock(&drv_ctx_lock);
+ return -EBUSY;
+ }
+ pr_debug("%s: %d", __func__, __LINE__);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ pr_err("malloc fail\n");
+ mutex_unlock(&drv_ctx_lock);
+ return -ENOMEM;
+ }
+ sst_drv_ctx = ctx;
+ mutex_unlock(&drv_ctx_lock);
+ return 0;
+}
+
+static ssize_t sst_sysfs_get_recovery(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", ctx->sst_state);
+}
+
+
+static ssize_t sst_sysfs_set_recovery(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ long val;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (kstrtol(buf, 0, &val))
+ return -EINVAL;
+
+ if (val == 1) {
+ if (!atomic_read(&ctx->pm_usage_count)) {
+ pr_debug("%s: set sst state to uninit...\n", __func__);
+ sst_set_fw_state_locked(ctx, SST_UN_INIT);
+ } else {
+ pr_err("%s: not setting sst state... %d\n", __func__,
+ atomic_read(&ctx->pm_usage_count));
+ pr_err("Unrecoverable state....\n");
+ BUG();
+ return -EPERM;
+ }
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(audio_recovery, S_IRUGO | S_IWUSR,
+ sst_sysfs_get_recovery, sst_sysfs_set_recovery);
+
+int sst_request_firmware_async(struct intel_sst_drv *ctx)
+{
+ int ret = 0;
+
+ snprintf(ctx->firmware_name, sizeof(ctx->firmware_name),
+ "%s%04x%s", "fw_sst_",
+ ctx->pci_id, ".bin");
+ pr_debug("Requesting FW %s now...\n", ctx->firmware_name);
+
+ trace_sst_fw_download("Request firmware async", ctx->sst_state);
+
+ ret = request_firmware_nowait(THIS_MODULE, 1, ctx->firmware_name,
+ ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
+ if (ret)
+ pr_err("could not load firmware %s error %d\n", ctx->firmware_name, ret);
+
+ return ret;
+}
+/*
+* intel_sst_probe - PCI probe function
+*
+* @pci: PCI device structure
+* @pci_id: PCI device ID structure
+*
+* This function is called by OS when a device is found
+* This enables the device, interrupt etc
+*/
+static int intel_sst_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ int i, ret = 0;
+ struct intel_sst_ops *ops;
+ struct sst_platform_info *sst_pdata = pci->dev.platform_data;
+ int ddr_base;
+ u32 ssp_base_add;
+ u32 dma_base_add;
+ u32 len;
+
+
+
+ pr_debug("Probe for DID %x\n", pci->device);
+ ret = sst_alloc_drv_context(&pci->dev);
+ if (ret)
+ return ret;
+
+ sst_drv_ctx->dev = &pci->dev;
+ sst_drv_ctx->pci_id = pci->device;
+ if (!sst_pdata)
+ return -EINVAL;
+ sst_drv_ctx->pdata = sst_pdata;
+
+ if (!sst_drv_ctx->pdata->probe_data)
+ return -EINVAL;
+ memcpy(&sst_drv_ctx->info, sst_drv_ctx->pdata->probe_data,
+ sizeof(sst_drv_ctx->info));
+
+ sst_drv_ctx->use_32bit_ops = sst_drv_ctx->pdata->ipc_info->use_32bit_ops;
+ sst_drv_ctx->mailbox_recv_offset = sst_drv_ctx->pdata->ipc_info->mbox_recv_off;
+
+ if (0 != sst_driver_ops(sst_drv_ctx))
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+ mutex_init(&sst_drv_ctx->stream_lock);
+ mutex_init(&sst_drv_ctx->sst_lock);
+ mutex_init(&sst_drv_ctx->mixer_ctrl_lock);
+ mutex_init(&sst_drv_ctx->csr_lock);
+
+ sst_drv_ctx->stream_cnt = 0;
+ sst_drv_ctx->fw_in_mem = NULL;
+ sst_drv_ctx->vcache.file1_in_mem = NULL;
+ sst_drv_ctx->vcache.file2_in_mem = NULL;
+ sst_drv_ctx->vcache.size1 = 0;
+ sst_drv_ctx->vcache.size2 = 0;
+
+ /* we don't use dma, so set to 0*/
+ sst_drv_ctx->use_dma = 0; //1;
+ sst_drv_ctx->use_lli = 1;
+
+ INIT_LIST_HEAD(&sst_drv_ctx->memcpy_list);
+ INIT_LIST_HEAD(&sst_drv_ctx->libmemcpy_list);
+
+ INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
+ INIT_LIST_HEAD(&sst_drv_ctx->block_list);
+ INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+ INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, ops->post_message);
+ init_waitqueue_head(&sst_drv_ctx->wait_queue);
+
+ sst_drv_ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+ if (!sst_drv_ctx->mad_wq)
+ goto do_free_drv_ctx;
+ sst_drv_ctx->post_msg_wq =
+ create_singlethread_workqueue("sst_post_msg_wq");
+ if (!sst_drv_ctx->post_msg_wq)
+ goto free_mad_wq;
+
+ spin_lock_init(&sst_drv_ctx->ipc_spin_lock);
+ spin_lock_init(&sst_drv_ctx->block_lock);
+ spin_lock_init(&sst_drv_ctx->pvt_id_lock);
+ spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+
+ sst_drv_ctx->ipc_reg.ipcx = SST_IPCX + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+ sst_drv_ctx->ipc_reg.ipcd = SST_IPCD + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+ pr_debug("ipcx 0x%x ipxd 0x%x", sst_drv_ctx->ipc_reg.ipcx,
+ sst_drv_ctx->ipc_reg.ipcd);
+
+ pr_info("Got drv data max stream %d\n",
+ sst_drv_ctx->info.max_streams);
+ for (i = 1; i <= sst_drv_ctx->info.max_streams; i++) {
+ struct stream_info *stream = &sst_drv_ctx->streams[i];
+ memset(stream, 0, sizeof(*stream));
+ stream->pipe_id = PIPE_RSVD;
+ mutex_init(&stream->lock);
+ }
+
+ ret = sst_request_firmware_async(sst_drv_ctx);
+ if (ret) {
+ pr_err("Firmware download failed:%d\n", ret);
+ goto do_free_mem;
+ }
+ /* Init the device */
+ ret = pci_enable_device(pci);
+ if (ret) {
+ pr_err("device can't be enabled\n");
+ goto do_free_mem;
+ }
+ sst_drv_ctx->pci = pci_dev_get(pci);
+ ret = pci_request_regions(pci, SST_DRV_NAME);
+ if (ret)
+ goto do_disable_device;
+ /* map registers */
+ /* SST Shim */
+
+ if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) {
+ sst_drv_ctx->ddr_base = pci_resource_start(pci, 0);
+ /*
+ * check that the relocated IMR base matches with FW Binary
+ * put temporary check till better soln is available for FW
+ */
+ ddr_base = relocate_imr_addr_mrfld(sst_drv_ctx->ddr_base);
+ if (!sst_drv_ctx->pdata->lib_info) {
+ pr_err("%s:lib_info pointer NULL\n", __func__);
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ if (ddr_base != sst_drv_ctx->pdata->lib_info->mod_base) {
+ pr_err("FW LSP DDR BASE does not match with IFWI\n");
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ sst_drv_ctx->ddr_end = pci_resource_end(pci, 0);
+
+ sst_drv_ctx->ddr = pci_ioremap_bar(pci, 0);
+ if (!sst_drv_ctx->ddr)
+ goto do_unmap_ddr;
+ pr_debug("sst: DDR Ptr %p\n", sst_drv_ctx->ddr);
+ } else {
+ sst_drv_ctx->ddr = NULL;
+ }
+
+ /* SHIM */
+ sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
+ sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
+ if (!sst_drv_ctx->shim)
+ goto do_release_regions;
+ pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
+
+ /* Shared SRAM */
+ sst_drv_ctx->mailbox_add = pci_resource_start(pci, 2);
+ sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
+ if (!sst_drv_ctx->mailbox)
+ goto do_unmap_shim;
+ pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
+
+ /* IRAM */
+ sst_drv_ctx->iram_end = pci_resource_end(pci, 3);
+ sst_drv_ctx->iram_base = pci_resource_start(pci, 3);
+ sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
+ if (!sst_drv_ctx->iram)
+ goto do_unmap_sram;
+ pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
+
+ /* DRAM */
+ sst_drv_ctx->dram_end = pci_resource_end(pci, 4);
+ sst_drv_ctx->dram_base = pci_resource_start(pci, 4);
+ sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
+ if (!sst_drv_ctx->dram)
+ goto do_unmap_iram;
+ pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
+
+ if ((sst_pdata->pdata != NULL) &&
+ (sst_pdata->debugfs_data != NULL)) {
+ if (sst_pdata->ssp_data != NULL) {
+ /* SSP Register */
+ ssp_base_add = sst_pdata->ssp_data->base_add;
+ len = sst_pdata->debugfs_data->ssp_reg_size;
+ for (i = 0; i < sst_pdata->debugfs_data->num_ssp; i++) {
+ sst_drv_ctx->debugfs.ssp[i] =
+ devm_ioremap(&pci->dev,
+ ssp_base_add + (len * i), len);
+ if (!sst_drv_ctx->debugfs.ssp[i]) {
+ pr_warn("ssp ioremap failed\n");
+ continue;
+ }
+
+ pr_debug("\n ssp io 0x%p ssp 0x%x size 0x%x",
+ sst_drv_ctx->debugfs.ssp[i],
+ ssp_base_add, len);
+ }
+ }
+
+ /* DMA Register */
+ dma_base_add = sst_pdata->pdata->sst_dma_base[0];
+ len = sst_pdata->debugfs_data->dma_reg_size;
+ for (i = 0; i < sst_pdata->debugfs_data->num_dma; i++) {
+ sst_drv_ctx->debugfs.dma_reg[i] =
+ devm_ioremap(&pci->dev,
+ dma_base_add + (len * i), len);
+ if (!sst_drv_ctx->debugfs.dma_reg[i]) {
+ pr_warn("dma ioremap failed\n");
+ continue;
+ }
+
+ pr_debug("\n dma io 0x%p ssp 0x%x size 0x%x",
+ sst_drv_ctx->debugfs.dma_reg[i],
+ dma_base_add, len);
+ }
+ }
+
+ /* Do not access iram/dram etc before LPE is reset */
+
+ sst_drv_ctx->dump_buf.iram_buf.size = pci_resource_len(pci, 3);
+ sst_drv_ctx->dump_buf.iram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.iram_buf.size,
+ GFP_KERNEL);
+ if (!sst_drv_ctx->dump_buf.iram_buf.buf) {
+ pr_err("%s: no memory\n", __func__);
+ ret = -ENOMEM;
+ goto do_unmap_dram;
+ }
+
+ sst_drv_ctx->dump_buf.dram_buf.size = pci_resource_len(pci, 4);
+ sst_drv_ctx->dump_buf.dram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.dram_buf.size,
+ GFP_KERNEL);
+ if (!sst_drv_ctx->dump_buf.dram_buf.buf) {
+ pr_err("%s: no memory\n", __func__);
+ ret = -ENOMEM;
+ goto do_free_iram_buf;
+ }
+
+ pr_debug("\niram len 0x%x dram len 0x%x",
+ sst_drv_ctx->dump_buf.iram_buf.size,
+ sst_drv_ctx->dump_buf.dram_buf.size);
+
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+ sst_drv_ctx->probe_bytes = kzalloc(SST_MAX_BIN_BYTES, GFP_KERNEL);
+ if (!sst_drv_ctx->probe_bytes) {
+ pr_err("%s: no memory\n", __func__);
+ ret = -ENOMEM;
+ goto do_free_dram_buf;
+ }
+ }
+
+ sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+ sst_drv_ctx->irq_num = pci->irq;
+ /* Register the ISR */
+ ret = request_threaded_irq(pci->irq, sst_drv_ctx->ops->interrupt,
+ sst_drv_ctx->ops->irq_thread, 0, SST_DRV_NAME,
+ sst_drv_ctx);
+ if (ret)
+ goto do_free_probe_bytes;
+ pr_debug("Registered IRQ 0x%x\n", pci->irq);
+
+ /*Register LPE Control as misc driver*/
+ ret = misc_register(&lpe_ctrl);
+ if (ret) {
+ pr_err("couldn't register control device\n");
+ goto do_free_irq;
+ }
+ /* default intr are unmasked so set this as masked */
+ if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID)
+ sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, 0xFFFF0038);
+
+ if (sst_drv_ctx->use_32bit_ops) {
+ pr_debug("allocate mem for context save/restore\n ");
+ /*allocate mem for fw context save during suspend*/
+ sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
+ if (!sst_drv_ctx->fw_cntx) {
+ ret = -ENOMEM;
+ goto do_free_misc;
+ }
+ /*setting zero as that is valid mem to restore*/
+ sst_drv_ctx->fw_cntx_size = 0;
+ }
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+ u32 csr;
+ u32 csr2;
+ u32 clkctl;
+
+ /*set lpe start clock and ram size*/
+ csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr |= 0x30000;
+ /*make sure clksel set to OSC for SSP0,1 (default)*/
+ csr &= 0xFFFFFFF3;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
+
+ /*set clock output enable for SSP0,1,3*/
+ clkctl = sst_shim_read(sst_drv_ctx->shim, SST_CLKCTL);
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ clkctl |= (0x7 << 16);
+ else
+ clkctl |= ((1<<16)|(1<<17));
+ sst_shim_write(sst_drv_ctx->shim, SST_CLKCTL, clkctl);
+
+ /* set SSP0 & SSP1 disable DMA Finish*/
+ csr2 = sst_shim_read(sst_drv_ctx->shim, SST_CSR2);
+ /*set SSP3 disable DMA finsh for SSSP3 */
+ csr2 |= BIT(1)|BIT(2);
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR2, csr2);
+ }
+ if (sst_drv_ctx->pdata->ssp_data) {
+ if (sst_drv_ctx->pdata->ssp_data->gpio_in_use)
+ sst_set_gpio_conf(&sst_drv_ctx->pdata->ssp_data->gpio);
+ }
+ pci_set_drvdata(pci, sst_drv_ctx);
+ pm_runtime_allow(sst_drv_ctx->dev);
+ pm_runtime_put_noidle(sst_drv_ctx->dev);
+ register_sst(sst_drv_ctx->dev);
+ sst_debugfs_init(sst_drv_ctx);
+ sst_drv_ctx->qos = kzalloc(sizeof(struct pm_qos_request),
+ GFP_KERNEL);
+ if (!sst_drv_ctx->qos)
+ goto do_free_misc;
+ pm_qos_add_request(sst_drv_ctx->qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ ret = device_create_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+ if (ret) {
+ pr_err("could not create sysfs %s file\n",
+ dev_attr_audio_recovery.attr.name);
+ goto do_free_qos;
+ }
+
+ pr_info("%s successfully done!\n", __func__);
+ return ret;
+
+do_free_qos:
+ pm_qos_remove_request(sst_drv_ctx->qos);
+ kfree(sst_drv_ctx->qos);
+do_free_misc:
+ misc_deregister(&lpe_ctrl);
+do_free_irq:
+ free_irq(pci->irq, sst_drv_ctx);
+do_free_probe_bytes:
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ kfree(sst_drv_ctx->probe_bytes);
+do_free_dram_buf:
+#ifdef CONFIG_DEBUG_FS
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ kfree(sst_drv_ctx->dump_buf.dram_buf.buf);
+do_free_iram_buf:
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ kfree(sst_drv_ctx->dump_buf.iram_buf.buf);
+#endif
+do_unmap_dram:
+ iounmap(sst_drv_ctx->dram);
+do_unmap_iram:
+ iounmap(sst_drv_ctx->iram);
+do_unmap_sram:
+ iounmap(sst_drv_ctx->mailbox);
+do_unmap_shim:
+ iounmap(sst_drv_ctx->shim);
+
+do_unmap_ddr:
+ if (sst_drv_ctx->ddr)
+ iounmap(sst_drv_ctx->ddr);
+
+do_release_regions:
+ pci_release_regions(pci);
+do_disable_device:
+ pci_disable_device(pci);
+do_free_mem:
+ destroy_workqueue(sst_drv_ctx->post_msg_wq);
+free_mad_wq:
+ destroy_workqueue(sst_drv_ctx->mad_wq);
+do_free_drv_ctx:
+ sst_drv_ctx = NULL;
+ pr_err("Probe failed with %d\n", ret);
+ return ret;
+}
+
+/**
+* intel_sst_remove - PCI remove function
+*
+* @pci: PCI device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+static void intel_sst_remove(struct pci_dev *pci)
+{
+ struct intel_sst_drv *sst_drv_ctx = pci_get_drvdata(pci);
+ sst_debugfs_exit(sst_drv_ctx);
+ pm_runtime_get_noresume(sst_drv_ctx->dev);
+ pm_runtime_forbid(sst_drv_ctx->dev);
+ unregister_sst(sst_drv_ctx->dev);
+ pci_dev_put(sst_drv_ctx->pci);
+ sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+ misc_deregister(&lpe_ctrl);
+ free_irq(pci->irq, sst_drv_ctx);
+
+ iounmap(sst_drv_ctx->dram);
+ iounmap(sst_drv_ctx->iram);
+ iounmap(sst_drv_ctx->mailbox);
+ iounmap(sst_drv_ctx->shim);
+#ifdef CONFIG_DEBUG_FS
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+ kfree(sst_drv_ctx->dump_buf.iram_buf.buf);
+ kfree(sst_drv_ctx->dump_buf.dram_buf.buf);
+ }
+#endif
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ kfree(sst_drv_ctx->probe_bytes);
+
+ device_remove_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+ kfree(sst_drv_ctx->fw_cntx);
+ kfree(sst_drv_ctx->runtime_param.param.addr);
+ flush_scheduled_work();
+ destroy_workqueue(sst_drv_ctx->post_msg_wq);
+ destroy_workqueue(sst_drv_ctx->mad_wq);
+ pm_qos_remove_request(sst_drv_ctx->qos);
+ kfree(sst_drv_ctx->qos);
+ kfree(sst_drv_ctx->fw_sg_list.src);
+ kfree(sst_drv_ctx->fw_sg_list.dst);
+ sst_drv_ctx->fw_sg_list.list_len = 0;
+ kfree(sst_drv_ctx->fw_in_mem);
+ sst_drv_ctx->fw_in_mem = NULL;
+ sst_memcpy_free_resources();
+ sst_drv_ctx = NULL;
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+ pci_set_drvdata(pci, NULL);
+}
+
+inline void sst_save_shim64(struct intel_sst_drv *ctx,
+ void __iomem *shim,
+ struct sst_shim_regs64 *shim_regs)
+{
+ unsigned long irq_flags;
+ spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+
+ shim_regs->csr = sst_shim_read64(shim, SST_CSR),
+ shim_regs->pisr = sst_shim_read64(shim, SST_PISR),
+ shim_regs->pimr = sst_shim_read64(shim, SST_PIMR),
+ shim_regs->isrx = sst_shim_read64(shim, SST_ISRX),
+ shim_regs->isrd = sst_shim_read64(shim, SST_ISRD),
+ shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+ shim_regs->imrd = sst_shim_read64(shim, SST_IMRD),
+ shim_regs->ipcx = sst_shim_read64(shim, ctx->ipc_reg.ipcx),
+ shim_regs->ipcd = sst_shim_read64(shim, ctx->ipc_reg.ipcd),
+ shim_regs->isrsc = sst_shim_read64(shim, SST_ISRSC),
+ shim_regs->isrlpesc = sst_shim_read64(shim, SST_ISRLPESC),
+ shim_regs->imrsc = sst_shim_read64(shim, SST_IMRSC),
+ shim_regs->imrlpesc = sst_shim_read64(shim, SST_IMRLPESC),
+ shim_regs->ipcsc = sst_shim_read64(shim, SST_IPCSC),
+ shim_regs->ipclpesc = sst_shim_read64(shim, SST_IPCLPESC),
+ shim_regs->clkctl = sst_shim_read64(shim, SST_CLKCTL),
+ shim_regs->csr2 = sst_shim_read64(shim, SST_CSR2);
+
+ spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
+ void __iomem *shim,
+ struct sst_shim_regs64 *shim_regs)
+{
+ unsigned long irq_flags;
+ spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+ sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+ spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+/*
+ * The runtime_suspend/resume is pretty much similar to the legacy
+ * suspend/resume with the noted exception below: The PCI core takes care of
+ * taking the system through D3hot and restoring it back to D0 and so there is
+ * no need to duplicate that here.
+ */
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+ union config_status_reg csr;
+ int ret = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ pr_info("runtime_suspend called\n");
+ if (ctx->sst_state == SST_UN_INIT) {
+ pr_debug("LPE is already in UNINIT state, No action");
+ return 0;
+ }
+ /*save fw context*/
+ if (ctx->ops->save_dsp_context(ctx))
+ return -EBUSY;
+
+ if (ctx->pci_id == SST_CLV_PCI_ID) {
+ /*Assert RESET on LPE Processor*/
+ csr.full = sst_shim_read(ctx->shim, SST_CSR);
+ ctx->csr_value = csr.full;
+ csr.full = csr.full | 0x2;
+ sst_shim_write(ctx->shim, SST_CSR, csr.full);
+ }
+
+ /* Move the SST state to Suspended */
+ sst_set_fw_state_locked(ctx, SST_SUSPENDED);
+
+ flush_workqueue(ctx->post_msg_wq);
+ synchronize_irq(ctx->irq_num);
+
+ if (ctx->pci_id == SST_BYT_PCI_ID || ctx->pci_id == SST_CHT_PCI_ID) {
+ /* save the shim registers because PMC doesn't save state */
+ sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+ }
+ return ret;
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+ u32 csr;
+ int ret = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ pr_info("runtime_resume called\n");
+
+ if (ctx->pci_id == SST_BYT_PCI_ID || ctx->pci_id == SST_CHT_PCI_ID) {
+ /* wait for device power up a/c to PCI spec */
+ usleep_range(10000, 11000);
+ sst_restore_shim64(ctx, ctx->shim, ctx->shim_regs64);
+ }
+
+ if (ctx->pci_id == SST_CLV_PCI_ID) {
+ csr = sst_shim_read(ctx->shim, SST_CSR);
+ /*
+ * To restore the csr_value after S0ix and S3 states.
+ * The value 0x30000 is to enable LPE dram high and low addresses.
+ * Reference:
+ * Penwell Audio Voice Module HAS 1.61 Section - 13.12.1 -
+ * CSR - Configuration and Status Register.
+ */
+ csr |= (ctx->csr_value | 0x30000);
+ sst_shim_write(ctx->shim, SST_CSR, csr);
+ if (sst_drv_ctx->pdata->ssp_data) {
+ if (ctx->pdata->ssp_data->gpio_in_use)
+ sst_set_gpio_conf(&ctx->pdata->ssp_data->gpio);
+ }
+ }
+ /* When fw_clear_cache is set, clear the cached firmware copy */
+ /* fw_clear_cache is set through debugfs support */
+ if (atomic_read(&ctx->fw_clear_cache) && ctx->fw_in_mem) {
+ pr_debug("Clearing the cached firmware\n");
+ kfree(ctx->fw_in_mem);
+ ctx->fw_in_mem = NULL;
+ atomic_set(&ctx->fw_clear_cache, 0);
+ }
+
+ sst_set_fw_state_locked(ctx, SST_UN_INIT);
+ return ret;
+}
+
+static int intel_sst_suspend(struct device *dev)
+{
+ int retval = 0, usage_count;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ usage_count = atomic_read(&ctx->pm_usage_count);
+ if (usage_count) {
+ pr_err("Ret error for suspend:%d\n", usage_count);
+ return -EBUSY;
+ }
+ retval = intel_sst_runtime_suspend(dev);
+
+ return retval;
+}
+
+static int intel_sst_runtime_idle(struct device *dev)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ pr_info("runtime_idle called\n");
+ if (ctx->sst_state != SST_UN_INIT) {
+ pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ return -EBUSY;
+
+}
+
+static void sst_do_shutdown(struct intel_sst_drv *ctx)
+{
+ int retval = 0;
+ unsigned int pvt_id;
+ struct ipc_post *msg = NULL;
+ struct sst_block *block = NULL;
+
+ pr_debug(" %s called\n", __func__);
+ if (ctx->sst_state == SST_SUSPENDED ||
+ ctx->sst_state == SST_UN_INIT) {
+ sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+ pr_debug("sst is already in suspended/un-int state\n");
+ return;
+ }
+ if (!ctx->use_32bit_ops)
+ return;
+
+ sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+ flush_workqueue(ctx->post_msg_wq);
+ pvt_id = sst_assign_pvt_id(ctx);
+ retval = sst_create_block_and_ipc_msg(&msg, false,
+ ctx, &block,
+ IPC_IA_PREPARE_SHUTDOWN, pvt_id);
+ if (retval) {
+ pr_err("sst_create_block returned error!\n");
+ return;
+ }
+ sst_fill_header(&msg->header, IPC_IA_PREPARE_SHUTDOWN, 0, pvt_id);
+ sst_add_to_dispatch_list_and_post(ctx, msg);
+ sst_wait_timeout(ctx, block);
+ sst_free_block(ctx, block);
+}
+
+
+/**
+* sst_pci_shutdown - PCI shutdown function
+*
+* @pci: PCI device structure
+*
+* This function is called by OS when a device is shutdown/reboot
+*
+*/
+
+static void sst_pci_shutdown(struct pci_dev *pci)
+{
+ struct intel_sst_drv *ctx = pci_get_drvdata(pci);
+
+ pr_debug(" %s called\n", __func__);
+
+ sst_do_shutdown(ctx);
+ disable_irq_nosync(pci->irq);
+}
+
+/**
+* sst_acpi_shutdown - platform shutdown function
+*
+* @pci: Platform device structure
+*
+* This function is called by OS when a device is shutdown/reboot
+*
+*/
+static void sst_acpi_shutdown(struct platform_device *pdev)
+{
+ struct intel_sst_drv *ctx = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ pr_debug(" %s called\n", __func__);
+
+ sst_do_shutdown(ctx);
+ disable_irq_nosync(irq);
+}
+
+static const struct dev_pm_ops intel_sst_pm = {
+ .suspend = intel_sst_suspend,
+ .resume = intel_sst_runtime_resume,
+ .runtime_suspend = intel_sst_runtime_suspend,
+ .runtime_resume = intel_sst_runtime_resume,
+ .runtime_idle = intel_sst_runtime_idle,
+};
+
+static const struct acpi_device_id sst_acpi_ids[];
+
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid)
+{
+ const struct acpi_device_id *id;
+
+ pr_debug("%s", __func__);
+ for (id = sst_acpi_ids; id->id[0]; id++)
+ if (!strncmp(id->id, hid, 16))
+ return (struct sst_platform_info *)id->driver_data;
+ return NULL;
+}
+
+/* PCI Routines */
+static DEFINE_PCI_DEVICE_TABLE(intel_sst_ids) = {
+ { PCI_VDEVICE(INTEL, SST_CLV_PCI_ID), 0},
+ { PCI_VDEVICE(INTEL, SST_MRFLD_PCI_ID), 0},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_sst_ids);
+
+static const struct acpi_device_id sst_acpi_ids[] = {
+ { "LPE0F28", (kernel_ulong_t) &byt_rvp_platform_data },
+ { "LPE0F281", (kernel_ulong_t) &byt_ffrd8_platform_data },
+ { "80860F28", (kernel_ulong_t) &byt_ffrd8_platform_data },
+ { "808622A8", (kernel_ulong_t) &cht_platform_data },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sst_acpi_ids);
+
+static struct pci_driver driver = {
+ .name = SST_DRV_NAME,
+ .id_table = intel_sst_ids,
+ .probe = intel_sst_probe,
+ .remove = intel_sst_remove,
+ .shutdown = sst_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &intel_sst_pm,
+ },
+#endif
+};
+
+static struct platform_driver sst_acpi_driver = {
+ .driver = {
+ .name = "intel_sst_acpi",
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(sst_acpi_ids),
+ .pm = &intel_sst_pm,
+ },
+ .probe = sst_acpi_probe,
+ .remove = sst_acpi_remove,
+ .shutdown = sst_acpi_shutdown,
+};
+
+
+/**
+* intel_sst_init - Module init function
+*
+* Registers with PCI
+* Registers with /dev
+* Init all data strutures
+*/
+static int __init intel_sst_init(void)
+{
+ /* Init all variables, data structure etc....*/
+ int ret = 0;
+ pr_info("INFO: ******** SST DRIVER loading.. Ver: %s\n",
+ SST_DRIVER_VERSION);
+
+ mutex_init(&drv_ctx_lock);
+ /* Register with PCI */
+ ret = pci_register_driver(&driver);
+ if (ret)
+ pr_err("PCI register failed\n");
+
+ ret = platform_driver_register(&sst_acpi_driver);
+ if (ret)
+ pr_err("ACPI register failed\n");
+ return ret;
+}
+
+/**
+* intel_sst_exit - Module exit function
+*
+* Unregisters with PCI
+* Unregisters with /dev
+* Frees all data strutures
+*/
+static void __exit intel_sst_exit(void)
+{
+ pci_unregister_driver(&driver);
+ platform_driver_unregister(&sst_acpi_driver);
+
+ pr_debug("driver unloaded\n");
+ sst_drv_ctx = NULL;
+ return;
+}
+
+module_init(intel_sst_init);
+module_exit(intel_sst_exit);
--- /dev/null
+/*
+ * sst.h - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Common private declarations for SST
+ */
+#ifndef __SST_H__
+#define __SST_H__
+
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/lnw_gpio.h>
+#include <asm/platform_sst.h>
+#include <sound/intel_sst_ioctl.h>
+
+#define SST_DRIVER_VERSION "3.0.8"
+
+/* driver names */
+#define SST_DRV_NAME "intel_sst_driver"
+#define SST_CLV_PCI_ID 0x08E7
+#define SST_MRFLD_PCI_ID 0x119A
+#define SST_BYT_PCI_ID 0x0F28
+#define SST_CHT_PCI_ID 0x22A8
+
+#define SST_SUSPEND_DELAY 2000
+#define FW_CONTEXT_MEM (64*1024)
+#define SST_ICCM_BOUNDARY 4
+#define SST_CONFIG_SSP_SIGN 0x7ffe8001
+
+/* FIXME: All this info should come from platform data
+ * move this when the base framework is ready to pass
+ * platform data to SST driver
+ */
+#define MRFLD_FW_VIRTUAL_BASE 0xC0000000
+#define MRFLD_FW_DDR_BASE_OFFSET 0x0
+#define MRFLD_FW_FEATURE_BASE_OFFSET 0x4
+#define MRFLD_FW_BSS_RESET_BIT 0
+extern struct intel_sst_drv *sst_drv_ctx;
+enum sst_states {
+ SST_FW_LOADED = 1,
+ SST_FW_RUNNING,
+ SST_START_INIT,
+ SST_UN_INIT,
+ SST_ERROR,
+ SST_SUSPENDED,
+ SST_FW_CTXT_RESTORE,
+ SST_SHUTDOWN,
+ SST_FW_LIB_LOAD,
+};
+
+enum sst_algo_ops {
+ SST_SET_ALGO = 0,
+ SST_GET_ALGO = 1,
+};
+
+#define SST_BLOCK_TIMEOUT 1000
+
+/* SST register map */
+#define SST_CSR 0x00
+#define SST_PISR 0x08
+#define SST_PIMR 0x10
+#define SST_ISRX 0x18
+#define SST_ISRD 0x20
+#define SST_IMRX 0x28
+#define SST_IMRD 0x30
+#define SST_IPCX 0x38 /* IPC IA-SST */
+#define SST_IPCD 0x40 /* IPC SST-IA */
+#define SST_ISRSC 0x48
+#define SST_ISRLPESC 0x50
+#define SST_IMRSC 0x58
+#define SST_IMRLPESC 0x60
+#define SST_IPCSC 0x68
+#define SST_IPCLPESC 0x70
+#define SST_CLKCTL 0x78
+#define SST_CSR2 0x80
+
+#define SST_SHIM_BEGIN SST_CSR
+#define SST_SHIM_END SST_CSR2
+#define SST_SHIM_SIZE 0x88
+
+#define FW_SIGNATURE_SIZE 4
+
+/* stream states */
+enum sst_stream_states {
+ STREAM_UN_INIT = 0, /* Freed/Not used stream */
+ STREAM_RUNNING = 1, /* Running */
+ STREAM_PAUSED = 2, /* Paused stream */
+ STREAM_DECODE = 3, /* stream is in decoding only state */
+ STREAM_INIT = 4, /* stream init, waiting for data */
+ STREAM_RESET = 5, /* force reset on recovery */
+};
+
+enum sst_ram_type {
+ SST_IRAM = 1,
+ SST_DRAM = 2,
+};
+
+/* SST shim registers to structure mapping */
+union config_status_reg {
+ struct {
+ u32 mfld_strb:1;
+ u32 sst_reset:1;
+ u32 clk_sel:3;
+ u32 sst_clk:2;
+ u32 bypass:3;
+ u32 run_stall:1;
+ u32 rsvd1:2;
+ u32 strb_cntr_rst:1;
+ u32 rsvd:18;
+ } part;
+ u32 full;
+};
+
+union interrupt_reg {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+union sst_imr_reg {
+ struct {
+ u32 done_interrupt:1;
+ u32 busy_interrupt:1;
+ u32 rsvd:30;
+ } part;
+ u32 full;
+};
+
+union sst_pisr_reg {
+ struct {
+ u32 pssp0:1;
+ u32 pssp1:1;
+ u32 rsvd0:3;
+ u32 dmac:1;
+ u32 rsvd1:26;
+ } part;
+ u32 full;
+};
+
+union sst_pimr_reg {
+ struct {
+ u32 ssp0:1;
+ u32 ssp1:1;
+ u32 rsvd0:3;
+ u32 dmac:1;
+ u32 rsvd1:10;
+ u32 ssp0_sc:1;
+ u32 ssp1_sc:1;
+ u32 rsvd2:3;
+ u32 dmac_sc:1;
+ u32 rsvd3:10;
+ } part;
+ u32 full;
+};
+
+union config_status_reg_mrfld {
+ struct {
+ u64 lpe_reset:1;
+ u64 lpe_reset_vector:1;
+ u64 runstall:1;
+ u64 pwaitmode:1;
+ u64 clk_sel:3;
+ u64 rsvd2:1;
+ u64 sst_clk:3;
+ u64 xt_snoop:1;
+ u64 rsvd3:4;
+ u64 clk_sel1:6;
+ u64 clk_enable:3;
+ u64 rsvd4:6;
+ u64 slim0baseclk:1;
+ u64 rsvd:32;
+ } part;
+ u64 full;
+};
+
+union interrupt_reg_mrfld {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+union sst_imr_reg_mrfld {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+/*This structure is used to block a user/fw data call to another
+fw/user call
+*/
+struct sst_block {
+ bool condition; /* condition for blocking check */
+ int ret_code; /* ret code when block is released */
+ void *data; /* data to be appsed for block if any */
+ u32 size;
+ bool on;
+ u32 msg_id; /*msg_id = msgid in mfld/ctp, mrfld = 0 */
+ u32 drv_id; /* = str_id in mfld/ctp, = drv_id in mrfld*/
+ struct list_head node;
+};
+
+/**
+ * struct stream_info - structure that holds the stream information
+ *
+ * @status : stream current state
+ * @prev : stream prev state
+ * @ops : stream operation pb/cp/drm...
+ * @bufs: stream buffer list
+ * @lock : stream mutex for protecting state
+ * @pcm_substream : PCM substream
+ * @period_elapsed : PCM period elapsed callback
+ * @sfreq : stream sampling freq
+ * @str_type : stream type
+ * @cumm_bytes : cummulative bytes decoded
+ * @str_type : stream type
+ * @src : stream source
+ * @device : output device type (medfield only)
+ */
+struct stream_info {
+ unsigned int status;
+ unsigned int prev;
+ unsigned int ops;
+ struct mutex lock; /* mutex */
+ void *pcm_substream;
+ void (*period_elapsed) (void *pcm_substream);
+ unsigned int sfreq;
+ u32 cumm_bytes;
+ void *compr_cb_param;
+ void (*compr_cb) (void *compr_cb_param);
+ void *drain_cb_param;
+ void (*drain_notify) (void *drain_cb_param);
+
+ unsigned int num_ch;
+ unsigned int pipe_id;
+ unsigned int str_id;
+ unsigned int task_id;
+};
+
+#define SST_FW_SIGN "$SST"
+#define SST_FW_LIB_SIGN "$LIB"
+
+/*
+ * struct fw_header - FW file headers
+ *
+ * @signature : FW signature
+ * @modules : # of modules
+ * @file_format : version of header format
+ * @reserved : reserved fields
+ */
+struct fw_header {
+ unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
+ u32 file_size; /* size of fw minus this header */
+ u32 modules; /* # of modules */
+ u32 file_format; /* version of header format */
+ u32 reserved[4];
+};
+
+struct fw_module_header {
+ unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
+ u32 mod_size; /* size of module */
+ u32 blocks; /* # of blocks */
+ u32 type; /* codec type, pp lib */
+ u32 entry_point;
+};
+
+struct fw_block_info {
+ enum sst_ram_type type; /* IRAM/DRAM */
+ u32 size; /* Bytes */
+ u32 ram_offset; /* Offset in I/DRAM */
+ u32 rsvd; /* Reserved field */
+};
+
+struct sst_ipc_msg_wq {
+ union ipc_header_mrfld mrfld_header;
+ struct ipc_dsp_hdr dsp_hdr;
+ char mailbox[SST_MAILBOX_SIZE];
+ struct work_struct wq;
+ union ipc_header header;
+};
+
+struct sst_dma {
+ struct dma_chan *ch;
+ struct intel_mid_dma_slave slave;
+ struct device *dev;
+};
+
+struct sst_runtime_param {
+ struct snd_sst_runtime_params param;
+};
+
+struct sst_sg_list {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ int list_len;
+ unsigned int sg_idx;
+};
+
+struct sst_memcpy_list {
+ struct list_head memcpylist;
+ void *dstn;
+ const void *src;
+ u32 size;
+ bool is_io;
+};
+
+struct sst_debugfs {
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *root;
+#endif
+ int runtime_pm_status;
+ void __iomem *ssp[SST_MAX_SSP_PORTS];
+ void __iomem *dma_reg[SST_MAX_DMA];
+ unsigned char get_params_data[1024];
+ ssize_t get_params_len;
+};
+
+struct lpe_log_buf_hdr {
+ u32 base_addr;
+ u32 end_addr;
+ u32 rd_addr;
+ u32 wr_addr;
+};
+
+struct snd_ssp_config {
+ int size;
+ char bytes[0];
+};
+
+struct snd_sst_probe_bytes {
+ u16 len;
+ char bytes[0];
+};
+
+#define PCI_DMAC_CLV_ID 0x08F0
+#define PCI_DMAC_MRFLD_ID 0x119B
+
+struct sst_ram_buf {
+ u32 size;
+ char *buf;
+};
+
+/* Firmware Module Information*/
+
+enum sst_lib_dwnld_status {
+ SST_LIB_NOT_FOUND = 0,
+ SST_LIB_FOUND,
+ SST_LIB_DOWNLOADED,
+};
+
+struct sst_module_info {
+ const char *name; /* Library name */
+ u32 id; /* Module ID */
+ u32 entry_pt; /* Module entry point */
+ u8 status; /* module status*/
+ u8 rsvd1;
+ u16 rsvd2;
+};
+
+/* Structure for managing the Library Region(1.5MB)
+ * in DDR in Merrifield
+ */
+struct sst_mem_mgr {
+ phys_addr_t current_base;
+ int avail;
+ unsigned int count;
+};
+
+struct sst_dump_buf {
+ /* buffers for iram-dram dump crash */
+ struct sst_ram_buf iram_buf;
+ struct sst_ram_buf dram_buf;
+};
+
+struct sst_ipc_reg {
+ int ipcx;
+ int ipcd;
+};
+
+struct sst_shim_regs64 {
+ u64 csr;
+ u64 pisr;
+ u64 pimr;
+ u64 isrx;
+ u64 isrd;
+ u64 imrx;
+ u64 imrd;
+ u64 ipcx;
+ u64 ipcd;
+ u64 isrsc;
+ u64 isrlpesc;
+ u64 imrsc;
+ u64 imrlpesc;
+ u64 ipcsc;
+ u64 ipclpesc;
+ u64 clkctl;
+ u64 csr2;
+};
+
+struct sst_vtsv_cache {
+ void *file1_in_mem;
+ u32 size1;
+ void *file2_in_mem;
+ u32 size2;
+};
+
+/***
+ *
+ * struct intel_sst_drv - driver ops
+ *
+ * @sst_state : current sst device state
+ * @pci_id : PCI device id loaded
+ * @shim : SST shim pointer
+ * @mailbox : SST mailbox pointer
+ * @iram : SST IRAM pointer
+ * @dram : SST DRAM pointer
+ * @pdata : SST info passed as a part of pci platform data
+ * @shim_phy_add : SST shim phy addr
+ * @shim_regs64: Struct to save shim registers
+ * @ipc_dispatch_list : ipc messages dispatched
+ * @rx_list : to copy the process_reply/process_msg from DSP
+ * @ipc_post_msg_wq : wq to post IPC messages context
+ * @ipc_post_msg : wq to post reply from FW context
+ * @mad_ops : MAD driver operations registered
+ * @mad_wq : MAD driver wq
+ * @post_msg_wq : wq to post IPC messages
+ * @streams : sst stream contexts
+ * @list_lock : sst driver list lock (deprecated)
+ * @ipc_spin_lock : spin lock to handle audio shim access and ipc queue
+ * @rx_msg_lock : spin lock to handle the rx messages from the DSP
+ * @scard_ops : sst card ops
+ * @pci : sst pci device struture
+ * @dev : pointer to current device struct
+ * @sst_lock : sst device lock
+ * @stream_lock : sst stream lock
+ * @pvt_id : sst private id
+ * @stream_cnt : total sst active stream count
+ * @pb_streams : total active pb streams
+ * @cp_streams : total active cp streams
+ * @audio_start : audio status
+ * @qos : PM Qos struct
+ * firmware_name : Firmware / Library name
+ */
+struct intel_sst_drv {
+ int sst_state;
+ int irq_num;
+ unsigned int pci_id;
+ bool use_32bit_ops;
+ void __iomem *ddr;
+ void __iomem *shim;
+ void __iomem *mailbox;
+ void __iomem *iram;
+ void __iomem *dram;
+ unsigned int mailbox_add;
+ unsigned int iram_base;
+ unsigned int dram_base;
+ unsigned int shim_phy_add;
+ unsigned int iram_end;
+ unsigned int dram_end;
+ unsigned int ddr_end;
+ unsigned int ddr_base;
+ unsigned int mailbox_recv_offset;
+ atomic_t pm_usage_count;
+ struct sst_shim_regs64 *shim_regs64;
+ struct list_head block_list;
+ struct list_head ipc_dispatch_list;
+ struct sst_platform_info *pdata;
+ struct sst_ipc_msg_wq ipc_post_msg;
+ struct list_head rx_list;
+ struct work_struct ipc_post_msg_wq;
+ wait_queue_head_t wait_queue;
+ struct workqueue_struct *mad_wq;
+ struct workqueue_struct *post_msg_wq;
+ unsigned int tstamp;
+ struct stream_info streams[MAX_NUM_STREAMS+1]; /*str_id 0 is not used*/
+ spinlock_t ipc_spin_lock; /* lock for Shim reg access and ipc queue */
+ spinlock_t block_lock; /* lock for adding block to block_list */
+ spinlock_t pvt_id_lock; /* lock for allocating private id */
+ spinlock_t rx_msg_lock;
+ struct pci_dev *pci;
+ struct device *dev;
+ unsigned int pvt_id;
+ struct mutex sst_lock;
+ struct mutex stream_lock;
+ unsigned int stream_cnt;
+ unsigned int *fw_cntx;
+ unsigned int fw_cntx_size;
+ unsigned int csr_value;
+ struct sst_dma dma;
+ void *fw_in_mem;
+ struct sst_runtime_param runtime_param;
+ unsigned int device_input_mixer;
+ struct mutex mixer_ctrl_lock;
+ struct dma_async_tx_descriptor *desc;
+ struct sst_sg_list fw_sg_list, library_list;
+ struct intel_sst_ops *ops;
+ struct sst_debugfs debugfs;
+ struct pm_qos_request *qos;
+ struct sst_info info;
+ unsigned int use_dma;
+ unsigned int use_lli;
+ atomic_t fw_clear_context;
+ atomic_t fw_clear_cache;
+ bool lib_dwnld_reqd;
+ /* list used during FW download in memcpy mode */
+ struct list_head memcpy_list;
+ /* list used during LIB download in memcpy mode */
+ struct list_head libmemcpy_list;
+ /* holds the stucts of iram/dram local buffers for dump*/
+ struct sst_dump_buf dump_buf;
+ /* Lock for CSR register change */
+ struct mutex csr_lock;
+ /* byte control to set the probe stream */
+ struct snd_sst_probe_bytes *probe_bytes;
+ /* contains the ipc registers */
+ struct sst_ipc_reg ipc_reg;
+ /* IMR region Library space memory manager */
+ struct sst_mem_mgr lib_mem_mgr;
+ /* Contains the cached vtsv files*/
+ struct sst_vtsv_cache vcache;
+ /* Pointer to device ID, now for same PCI_ID, HID will be
+ * will be different for FDK and EDK2. This will be used
+ * for devices where PCI or ACPI id is same but HID is
+ * different
+ */
+ const char *hid;
+ /* Holder for firmware name. Due to async call it needs to be
+ * persistent till worker thread gets called
+ */
+ char firmware_name[20];
+};
+
+extern struct intel_sst_drv *sst_drv_ctx;
+extern struct sst_platform_info byt_rvp_platform_data;
+extern struct sst_platform_info byt_ffrd8_platform_data;
+extern struct sst_platform_info cht_platform_data;
+
+/* misc definitions */
+#define FW_DWNL_ID 0xFF
+
+struct sst_fill_config {
+ u32 sign;
+ struct sst_board_config_data sst_bdata;
+ struct sst_platform_config_data sst_pdata;
+ u32 shim_phy_add;
+ u32 mailbox_add;
+} __packed;
+
+struct intel_sst_ops {
+ irqreturn_t (*interrupt) (int, void *);
+ irqreturn_t (*irq_thread) (int, void *);
+ void (*clear_interrupt) (void);
+ int (*start) (void);
+ int (*reset) (void);
+ void (*process_reply) (struct ipc_post *msg);
+ void (*post_message) (struct work_struct *work);
+ int (*sync_post_message) (struct ipc_post *msg);
+ void (*process_message) (struct ipc_post *msg);
+ void (*set_bypass)(bool set);
+ int (*save_dsp_context) (struct intel_sst_drv *sst);
+ void (*restore_dsp_context) (void);
+ int (*alloc_stream) (char *params, struct sst_block *block);
+ void (*post_download)(struct intel_sst_drv *sst);
+ void (*do_recovery)(struct intel_sst_drv *sst);
+};
+
+int sst_alloc_stream(char *params, struct sst_block *block);
+int sst_pause_stream(int id);
+int sst_resume_stream(int id);
+int sst_drop_stream(int id);
+int sst_next_track(void);
+int sst_free_stream(int id);
+int sst_start_stream(int str_id);
+int sst_send_byte_stream_mrfld(void *sbytes);
+int sst_send_probe_bytes(struct intel_sst_drv *sst);
+int sst_set_stream_param(int str_id, struct snd_sst_params *str_param);
+int sst_set_metadata(int str_id, char *params);
+int sst_get_stream(struct snd_sst_params *str_param);
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+ struct snd_sst_lib_download **lib_dnld);
+int sst_drain_stream(int str_id, bool partial_drain);
+
+
+int sst_sync_post_message_mfld(struct ipc_post *msg);
+void sst_post_message_mfld(struct work_struct *work);
+void sst_process_message_mfld(struct ipc_post *msg);
+void sst_process_reply_mfld(struct ipc_post *msg);
+int sst_start_mfld(void);
+int intel_sst_reset_dsp_mfld(void);
+void intel_sst_clear_intr_mfld(void);
+void intel_sst_set_bypass_mfld(bool set);
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg);
+void sst_post_message_mrfld(struct work_struct *work);
+void sst_process_message_mrfld(struct ipc_post *msg);
+void sst_process_reply_mrfld(struct ipc_post *msg);
+int sst_start_mrfld(void);
+int intel_sst_reset_dsp_mrfld(void);
+void intel_sst_clear_intr_mrfld(void);
+void sst_process_mad_ops(struct work_struct *work);
+
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
+ unsigned long arg);
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
+
+int sst_load_fw(void);
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx,
+ struct sst_module_info *mod_table, int mod_table_size);
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+ unsigned long *lib_base);
+void sst_post_download_ctp(struct intel_sst_drv *ctx);
+void sst_post_download_mrfld(struct intel_sst_drv *ctx);
+void sst_post_download_byt(struct intel_sst_drv *ctx);
+int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
+void sst_memcpy_free_resources(void);
+
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block);
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block);
+int sst_create_ipc_msg(struct ipc_post **arg, bool large);
+int sst_download_fw(void);
+int free_stream_context(unsigned int str_id);
+void sst_clean_stream(struct stream_info *stream);
+int intel_sst_register_compress(struct intel_sst_drv *sst);
+int intel_sst_remove_compress(struct intel_sst_drv *sst);
+void sst_cdev_fragment_elapsed(int str_id);
+int sst_send_sync_msg(int ipc, int str_id);
+int sst_get_num_channel(struct snd_sst_params *str_param);
+int sst_get_sfreq(struct snd_sst_params *str_param);
+int intel_sst_check_device(void);
+int sst_alloc_stream_ctp(char *params, struct sst_block *block);
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block);
+void sst_restore_fw_context(void);
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+ u32 msg_id, u32 drv_id);
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+ struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+ u32 msg_id, u32 drv_id);
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed);
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+ u32 drv_id, u32 ipc, void *data, u32 size);
+int sst_alloc_drv_context(struct device *dev);
+int sst_request_firmware_async(struct intel_sst_drv *ctx);
+int sst_driver_ops(struct intel_sst_drv *sst);
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid);
+int sst_acpi_probe(struct platform_device *pdev);
+int sst_acpi_remove(struct platform_device *pdev);
+void sst_save_shim64(struct intel_sst_drv *ctx, void __iomem *shim,
+ struct sst_shim_regs64 *shim_regs);
+void sst_firmware_load_cb(const struct firmware *fw, void *context);
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx);
+
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst);
+void sst_do_recovery(struct intel_sst_drv *sst);
+long intel_sst_ioctl_dsp(unsigned int cmd,
+ struct snd_ppp_params *algo_params, unsigned long arg);
+
+void sst_dump_to_buffer(const void *from, size_t from_len, char *buf);
+
+extern int intel_scu_ipc_simple_command(int, int);
+
+static inline int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
+{
+ int ret;
+
+ ret = pm_runtime_put_sync(sst_drv->dev);
+ if (ret < 0)
+ return ret;
+ atomic_dec(&sst_drv->pm_usage_count);
+
+ pr_debug("%s: count is %d now..\n", __func__,
+ atomic_read(&sst_drv->pm_usage_count));
+ return 0;
+}
+/*
+ * sst_fill_header - inline to fill sst header
+ *
+ * @header : ipc header
+ * @msg : IPC message to be sent
+ * @large : is ipc large msg
+ * @str_id : stream id
+ *
+ * this function is an inline function that sets the headers before
+ * sending a message
+ */
+static inline void sst_fill_header(union ipc_header *header,
+ int msg, int large, int str_id)
+{
+ header->part.msg_id = msg;
+ header->part.str_id = str_id;
+ header->part.large = large;
+ header->part.done = 0;
+ header->part.busy = 1;
+ header->part.data = 0;
+}
+
+
+static inline void sst_fill_header_mrfld(union ipc_header_mrfld *header,
+ int msg, int task_id, int large, int drv_id)
+{
+ header->full = 0;
+ header->p.header_high.part.msg_id = msg;
+ header->p.header_high.part.task_id = task_id;
+ header->p.header_high.part.large = large;
+ header->p.header_high.part.drv_id = drv_id;
+ header->p.header_high.part.done = 0;
+ header->p.header_high.part.busy = 1;
+ header->p.header_high.part.res_rqd = 1;
+}
+
+static inline void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
+ int pipe_id, int len)
+{
+ dsp->cmd_id = msg;
+ dsp->mod_index_id = 0xff;
+ dsp->pipe_id = pipe_id;
+ dsp->length = len;
+ dsp->mod_id = 0;
+}
+
+#define MAX_BLOCKS 15
+/* sst_assign_pvt_id - assign a pvt id for stream
+ *
+ * @sst_drv_ctx : driver context
+ *
+ * this inline function assigns a private id for calls that dont have stream
+ * context yet, should be called with lock held
+ */
+static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
+{
+ unsigned int local;
+
+ spin_lock(&sst_drv_ctx->pvt_id_lock);
+ sst_drv_ctx->pvt_id++;
+ if (sst_drv_ctx->pvt_id > MAX_BLOCKS)
+ sst_drv_ctx->pvt_id = 1;
+ local = sst_drv_ctx->pvt_id;
+ spin_unlock(&sst_drv_ctx->pvt_id_lock);
+ return local;
+}
+
+
+/*
+ * sst_init_stream - this function initialzes stream context
+ *
+ * @stream : stream struture
+ * @codec : codec for stream
+ * @sst_id : stream id
+ * @ops : stream operation
+ * @slot : stream pcm slot
+ * @device : device type
+ *
+ * this inline function initialzes stream context for allocated stream
+ */
+static inline void sst_init_stream(struct stream_info *stream,
+ int codec, int sst_id, int ops, u8 slot)
+{
+ stream->status = STREAM_INIT;
+ stream->prev = STREAM_UN_INIT;
+ stream->ops = ops;
+}
+
+static inline void sst_set_gpio_conf(const struct sst_gpio_config *gpio_conf)
+{
+ lnw_gpio_set_alt(gpio_conf->i2s_rx_alt, gpio_conf->alt_function);
+ lnw_gpio_set_alt(gpio_conf->i2s_tx_alt, gpio_conf->alt_function);
+ lnw_gpio_set_alt(gpio_conf->i2s_frame, gpio_conf->alt_function);
+ lnw_gpio_set_alt(gpio_conf->i2s_clock, gpio_conf->alt_function);
+}
+
+
+/*
+ * sst_validate_strid - this function validates the stream id
+ *
+ * @str_id : stream id to be validated
+ *
+ * returns 0 if valid stream
+ */
+static inline int sst_validate_strid(int str_id)
+{
+ if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
+ pr_err("SST ERR: invalid stream id : %d, max %d\n",
+ str_id, sst_drv_ctx->info.max_streams);
+ return -EINVAL;
+ } else
+ return 0;
+}
+
+static inline int sst_shim_write(void __iomem *addr, int offset, int value)
+{
+ writel(value, addr + offset);
+ return 0;
+}
+
+static inline u32 sst_shim_read(void __iomem *addr, int offset)
+{
+
+ return readl(addr + offset);
+}
+
+static inline u32 sst_reg_read(void __iomem *addr, int offset)
+{
+
+ return readl(addr + offset);
+}
+
+static inline u64 sst_reg_read64(void __iomem *addr, int offset)
+{
+ u64 val = 0;
+
+ memcpy_fromio(&val, addr + offset, sizeof(val));
+
+ return val;
+}
+
+static inline int sst_shim_write64(void __iomem *addr, int offset, u64 value)
+{
+ memcpy_toio(addr + offset, &value, sizeof(value));
+ return 0;
+}
+
+static inline u64 sst_shim_read64(void __iomem *addr, int offset)
+{
+ u64 val = 0;
+
+ memcpy_fromio(&val, addr + offset, sizeof(val));
+ return val;
+}
+
+static inline void
+sst_set_fw_state_locked(struct intel_sst_drv *sst_drv_ctx, int sst_state)
+{
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_drv_ctx->sst_state = sst_state;
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+}
+
+static inline struct stream_info *get_stream_info(int str_id)
+{
+ if (sst_validate_strid(str_id))
+ return NULL;
+ return &sst_drv_ctx->streams[str_id];
+}
+
+static inline int get_stream_id_mrfld(u32 pipe_id)
+{
+ int i;
+
+ for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
+ if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
+ return i;
+
+ pr_debug("%s: no such pipe_id(%u)", __func__, pipe_id);
+ return -1;
+}
+
+int register_sst(struct device *);
+int unregister_sst(struct device *);
+
+#ifdef CONFIG_DEBUG_FS
+void sst_debugfs_init(struct intel_sst_drv *sst);
+void sst_debugfs_exit(struct intel_sst_drv *sst);
+#else
+static inline void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+}
+
+static inline void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * FW should use virtual address 0xC000_0000 to map to the DDR
+ * reserved 2MB region at 512MB boundary. Currently the address of
+ * DDR region allocated by IA FW is not 512MB aligned. So FW is
+ * statically linking the DDR region at 0xDF600000. So we need to
+ * use the translated address to identify the DDR regions in the FW
+ * ELF binary.
+ */
+static inline u32 relocate_imr_addr_mrfld(u32 base_addr)
+{
+ /* Get the difference from 512MB aligned base addr */
+ /* relocate the base */
+ base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
+ return base_addr;
+}
+
+static inline void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
+ struct ipc_post *msg)
+{
+ unsigned long irq_flags;
+ spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+ list_add_tail(&msg->node, &sst->ipc_dispatch_list);
+ spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+ sst->ops->post_message(&sst->ipc_post_msg_wq);
+}
+#endif
--- /dev/null
+/* sst_acpi.c - SST (LPE) driver init file for ACPI enumeration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Authors: Ramesh Babu K V <Ramesh.Babu@intel.com>
+ * Authors: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <asm/platform_byt_audio.h>
+#include <asm/platform_sst.h>
+#include <acpi/acpi_bus.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+extern struct miscdevice lpe_ctrl;
+
+static const struct sst_platform_config_data sst_byt_pdata = {
+ .sst_sram_buff_base = 0xffffffff,
+ .sst_dma_base[0] = SST_BYT_DMA0_PHY_ADDR,
+ .sst_dma_base[1] = SST_BYT_DMA1_PHY_ADDR,
+};
+
+/* use array[0] for ssp_platform_data even though SSP2 is used */
+static const struct sst_board_config_data sst_byt_rvp_bdata = {
+ .active_ssp_ports = 1,
+ .platform_id = 3,
+ .board_id = 1,
+ .ihf_num_chan = 2,
+ .osc_clk_freq = 25000000,
+ .ssp_platform_data = {
+ [0] = {
+ .ssp_cfg_sst = 1,
+ .port_number = 2,
+ .is_master = 1,
+ .pack_mode = 1,
+ .num_slots_per_frame = 2,
+ .num_bits_per_slot = 24,
+ .active_tx_map = 3,
+ .active_rx_map = 3,
+ .ssp_frame_format = 3,
+ .frame_polarity = 1,
+ .serial_bitrate_clk_mode = 0,
+ .frame_sync_width = 24,
+ .dma_handshake_interface_tx = 5,
+ .dma_handshake_interface_rx = 4,
+ .network_mode = 0,
+ .start_delay = 1,
+ .ssp_base_add = SST_BYT_SSP2_PHY_ADDR,
+ },
+ },
+};
+
+static const struct sst_board_config_data sst_byt_ffrd8_bdata = {
+ .active_ssp_ports = 1,
+ .platform_id = 3,
+ .board_id = 1,
+ .ihf_num_chan = 2,
+ .osc_clk_freq = 25000000,
+ .ssp_platform_data = {
+ [0] = {
+ .ssp_cfg_sst = 1,
+ .port_number = 0,
+ .is_master = 1,
+ .pack_mode = 1,
+ .num_slots_per_frame = 2,
+ .num_bits_per_slot = 24,
+ .active_tx_map = 3,
+ .active_rx_map = 3,
+ .ssp_frame_format = 3,
+ .frame_polarity = 1,
+ .serial_bitrate_clk_mode = 0,
+ .frame_sync_width = 24,
+ .dma_handshake_interface_tx = 1,
+ .dma_handshake_interface_rx = 0,
+ .network_mode = 0,
+ .start_delay = 1,
+ .ssp_base_add = SST_BYT_SSP0_PHY_ADDR,
+ },
+ },
+};
+
+static const struct sst_board_config_data sst_byt_crv2_bdata = {
+ .active_ssp_ports = 1,
+ .platform_id = 3,
+ .board_id = 1,
+ .ihf_num_chan = 1,
+ .osc_clk_freq = 25000000,
+ .ssp_platform_data = {
+ [0] = {
+ .ssp_cfg_sst = 1,
+ .port_number = 0,
+ .is_master = 1,
+ .pack_mode = 1,
+ .num_slots_per_frame = 2,
+ .num_bits_per_slot = 24,
+ .active_tx_map = 3,
+ .active_rx_map = 3,
+ .ssp_frame_format = 3,
+ .frame_polarity = 1,
+ .serial_bitrate_clk_mode = 0,
+ .frame_sync_width = 24,
+ .dma_handshake_interface_tx = 1,
+ .dma_handshake_interface_rx = 0,
+ .network_mode = 0,
+ .start_delay = 1,
+ .ssp_base_add = SST_BYT_SSP0_PHY_ADDR,
+ },
+ },
+};
+
+static const struct sst_info byt_fwparse_info = {
+ .use_elf = true,
+ .max_streams = 4,
+ .dma_max_len = SST_MAX_DMA_LEN_MRFLD,
+ .iram_start = SST_BYT_IRAM_PHY_START,
+ .iram_end = SST_BYT_IRAM_PHY_END,
+ .iram_use = true,
+ .dram_start = SST_BYT_DRAM_PHY_START,
+ .dram_end = SST_BYT_DRAM_PHY_END,
+ .dram_use = true,
+ .imr_start = SST_BYT_IMR_VIRT_START,
+ .imr_end = SST_BYT_IMR_VIRT_END,
+ .imr_use = true,
+ .mailbox_start = SST_BYT_MBOX_PHY_ADDR,
+ .num_probes = 0,
+ .lpe_viewpt_rqd = true,
+};
+
+
+static const struct sst_info cht_fwparse_info = {
+ .use_elf = true,
+ .max_streams = MAX_NUM_STREAMS_MRFLD,
+ .dma_max_len = SST_MAX_DMA_LEN_MRFLD,
+ .iram_start = SST_BYT_IRAM_PHY_START,
+ .iram_end = SST_BYT_IRAM_PHY_END,
+ .iram_use = true,
+ .dram_start = SST_BYT_DRAM_PHY_START,
+ .dram_end = SST_BYT_DRAM_PHY_END,
+ .dram_use = true,
+ .imr_start = SST_BYT_IMR_VIRT_START,
+ .imr_end = SST_BYT_IMR_VIRT_END,
+ .imr_use = true,
+ .mailbox_start = SST_BYT_MBOX_PHY_ADDR,
+ .num_probes = 0,
+ .lpe_viewpt_rqd = true,
+};
+
+static const struct sst_ipc_info byt_ipc_info = {
+ .use_32bit_ops = true,
+ .ipc_offset = 4,
+ .mbox_recv_off = 0x400,
+};
+
+static const struct sst_lib_dnld_info byt_lib_dnld_info = {
+ .mod_base = SST_BYT_IMR_VIRT_START,
+ .mod_end = SST_BYT_IMR_VIRT_END,
+ .mod_table_offset = BYT_FW_MOD_TABLE_OFFSET,
+ .mod_table_size = BYT_FW_MOD_TABLE_SIZE,
+ .mod_ddr_dnld = true,
+};
+
+static const struct sst_ipc_info cht_ipc_info = {
+ .use_32bit_ops = false,
+ .ipc_offset = 0,
+ .mbox_recv_off = 0x400,
+};
+
+struct sst_platform_info cht_platform_data = {
+ .probe_data = &cht_fwparse_info,
+ .ssp_data = NULL,
+ .bdata = NULL,
+ .pdata = NULL,
+ .ipc_info = &cht_ipc_info,
+ .lib_info = NULL,
+};
+
+struct sst_platform_info byt_rvp_platform_data = {
+ .probe_data = &byt_fwparse_info,
+ .ssp_data = NULL,
+ .bdata = &sst_byt_rvp_bdata,
+ .pdata = &sst_byt_pdata,
+ .ipc_info = &byt_ipc_info,
+ .lib_info = &byt_lib_dnld_info,
+};
+
+struct sst_platform_info byt_ffrd8_platform_data = {
+ .probe_data = &byt_fwparse_info,
+ .ssp_data = NULL,
+ .bdata = &sst_byt_ffrd8_bdata,
+ .pdata = &sst_byt_pdata,
+ .ipc_info = &byt_ipc_info,
+ .lib_info = &byt_lib_dnld_info,
+};
+
+int sst_workqueue_init(struct intel_sst_drv *ctx)
+{
+ pr_debug("%s", __func__);
+
+ INIT_LIST_HEAD(&ctx->memcpy_list);
+ INIT_LIST_HEAD(&ctx->libmemcpy_list);
+ INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+ INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
+ INIT_LIST_HEAD(&ctx->block_list);
+ INIT_WORK(&ctx->ipc_post_msg.wq, ctx->ops->post_message);
+ init_waitqueue_head(&ctx->wait_queue);
+
+ ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+ if (!ctx->mad_wq)
+ goto err_wq;
+ ctx->post_msg_wq =
+ create_singlethread_workqueue("sst_post_msg_wq");
+ if (!ctx->post_msg_wq)
+ goto err_wq;
+ return 0;
+err_wq:
+ return -EBUSY;
+}
+
+void sst_init_locks(struct intel_sst_drv *ctx)
+{
+ mutex_init(&ctx->stream_lock);
+ mutex_init(&ctx->sst_lock);
+ mutex_init(&ctx->mixer_ctrl_lock);
+ mutex_init(&ctx->csr_lock);
+ spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+ spin_lock_init(&ctx->ipc_spin_lock);
+ spin_lock_init(&ctx->block_lock);
+ spin_lock_init(&ctx->pvt_id_lock);
+}
+
+int sst_destroy_workqueue(struct intel_sst_drv *ctx)
+{
+ pr_debug("%s", __func__);
+ if (ctx->mad_wq)
+ destroy_workqueue(ctx->mad_wq);
+ if (ctx->post_msg_wq)
+ destroy_workqueue(ctx->post_msg_wq);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int sst_platform_get_resources_fdk(struct intel_sst_drv *ctx,
+ struct platform_device *pdev)
+{
+ struct resource *rsrc;
+
+ pr_debug("%s", __func__);
+
+ /* All ACPI resource request here */
+ /* Get DDR addr from platform resource table */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rsrc) {
+ pr_err("Invalid DDR base from IFWI");
+ return -EIO;
+ }
+ ctx->ddr_base = rsrc->start;
+ ctx->ddr_end = rsrc->end;
+ pr_debug("DDR base: %#x", ctx->ddr_base);
+ ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+ resource_size(rsrc));
+ if (!ctx->ddr) {
+ pr_err("unable to map DDR");
+ return -EIO;
+ }
+
+ /* Get Shim addr from platform resource table */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rsrc) {
+ pr_err("Invalid SHIM base from IFWI");
+ return -EIO;
+ }
+ ctx->shim_phy_add = rsrc->start;
+ pr_debug("SHIM base: %#x", ctx->shim_phy_add);
+ ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+ resource_size(rsrc));
+ if (!ctx->shim) {
+ pr_err("unable to map SHIM");
+ return -EIO;
+ }
+ /* reassign physical address to LPE viewpoint address */
+ ctx->shim_phy_add = SST_BYT_SHIM_PHY_ADDR;
+
+ /* Get mailbox addr from platform resource table */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!rsrc) {
+ pr_err("Invalid Mailbox base from IFWI");
+ return -EIO;
+ }
+ ctx->mailbox_add = rsrc->start;
+ pr_debug("Mailbox base: %#x", ctx->mailbox_add);
+ ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+ resource_size(rsrc));
+ if (!ctx->mailbox) {
+ pr_err("unable to map mailbox");
+ return -EIO;
+ }
+ /* reassign physical address to LPE viewpoint address */
+ ctx->mailbox_add = sst_drv_ctx->info.mailbox_start;
+
+ /* Get iram/iccm addr from platform resource table */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!rsrc) {
+ pr_err("Invalid IRAM base from IFWI");
+ return -EIO;
+ }
+ ctx->iram_base = rsrc->start;
+ ctx->iram_end = rsrc->end;
+ pr_debug("IRAM base: %#x", ctx->iram_base);
+ ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+ resource_size(rsrc));
+ if (!ctx->iram) {
+ pr_err("unable to map IRAM");
+ return -EIO;
+ }
+
+ /* Get dram/dccm addr from platform resource table */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!rsrc) {
+ pr_err("Invalid DRAM base from IFWI");
+ return -EIO;
+ }
+ ctx->dram_base = rsrc->start;
+ ctx->dram_end = rsrc->end;
+ pr_debug("DRAM base: %#x", ctx->dram_base);
+ ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+ resource_size(rsrc));
+ if (!ctx->dram) {
+ pr_err("unable to map DRAM");
+ return -EIO;
+ }
+
+ /* Register the ISR */
+ ctx->irq_num = platform_get_irq(pdev, 0);
+ pr_debug("irq from pdev is:%d", ctx->irq_num);
+ return 0;
+}
+
+#define LPE_IRAM_OFFSET 0x0C0000
+#define LPE_IRAM_SIZE 0x040000
+#define LPE_DRAM_OFFSET 0x100000
+#define LPE_DRAM_SIZE 0x040000
+#define LPE_SHIM_OFFSET 0x140000
+#define LPE_SHIM_SIZE 0x004000
+#define LPE_MBOX_OFFSET 0x144000
+#define LPE_MBOX_SIZE 0x004000
+
+static int sst_platform_get_resources_edk(struct intel_sst_drv *ctx,
+ struct platform_device *pdev)
+{
+ struct resource *rsrc;
+
+ pr_debug("%s", __func__);
+
+ /* All ACPI resource request here */
+ /* Get Shim addr */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rsrc) {
+ pr_err("Invalid SHIM base from IFWI");
+ return -EIO;
+ }
+ pr_debug("LPE base: %#x size:%#x", (unsigned int) rsrc->start,
+ (unsigned int)resource_size(rsrc));
+ ctx->iram_base = rsrc->start + LPE_IRAM_OFFSET;
+ ctx->iram_end = ctx->iram_base + LPE_IRAM_SIZE - 1;
+ pr_debug("IRAM base: %#x", ctx->iram_base);
+ ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+ LPE_IRAM_SIZE);
+ if (!ctx->iram) {
+ pr_err("unable to map IRAM");
+ return -EIO;
+ }
+
+ ctx->dram_base = rsrc->start + LPE_DRAM_OFFSET;
+ ctx->dram_end = ctx->dram_base + LPE_DRAM_SIZE - 1;
+ pr_debug("DRAM base: %#x", ctx->dram_base);
+ ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+ LPE_DRAM_SIZE);
+ if (!ctx->dram) {
+ pr_err("unable to map DRAM");
+ return -EIO;
+ }
+
+ ctx->shim_phy_add = rsrc->start + LPE_SHIM_OFFSET;
+ pr_debug("SHIM base: %#x", ctx->shim_phy_add);
+ ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+ LPE_SHIM_SIZE);
+ if (!ctx->shim) {
+ pr_err("unable to map SHIM");
+ return -EIO;
+ }
+ /* reassign physical address to LPE viewpoint address */
+ ctx->shim_phy_add = SST_BYT_SHIM_PHY_ADDR;
+
+ /* Get mailbox addr */
+ ctx->mailbox_add = rsrc->start + LPE_MBOX_OFFSET;
+ pr_debug("Mailbox base: %#x", ctx->mailbox_add);
+ ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+ LPE_MBOX_SIZE);
+ if (!ctx->mailbox) {
+ pr_err("unable to map mailbox");
+ return -EIO;
+ }
+
+ /* reassign physical address to LPE viewpoint address */
+ ctx->mailbox_add = sst_drv_ctx->info.mailbox_start;
+
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!rsrc) {
+ pr_err("Invalid DDR base from IFWI");
+ return -EIO;
+ }
+ ctx->ddr_base = rsrc->start;
+ ctx->ddr_end = rsrc->end;
+ pr_debug("DDR base: %#x", ctx->ddr_base);
+ ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+ resource_size(rsrc));
+ if (!ctx->ddr) {
+ pr_err("unable to map DDR");
+ return -EIO;
+ }
+ /* Register the ISR */
+ if (!strncmp(ctx->hid, "80860F28", 8))
+ ctx->irq_num = platform_get_irq(pdev, 0);
+ else if (!strncmp(ctx->hid, "808622A8", 8)) {
+ /* FIXME: IRQ number will be moved to 0 once the BIOS fix is done */
+ ctx->irq_num = platform_get_irq(pdev, 5);
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+static int sst_platform_get_resources(const char *hid,
+ struct intel_sst_drv *ctx, struct platform_device *pdev)
+{
+
+ pr_debug("%s", __func__);
+
+ if (!strncmp(hid, "LPE0F281", 8)) {
+ ctx->pci_id = SST_BYT_PCI_ID;
+ return sst_platform_get_resources_fdk(ctx, pdev);
+ }
+ if (!strncmp(hid, "808622A8", 8)) {
+ ctx->pci_id = SST_CHT_PCI_ID;
+ return sst_platform_get_resources_edk(ctx, pdev);
+ }
+ if (!strncmp(hid, "80860F28", 8)) {
+ ctx->pci_id = SST_BYT_PCI_ID;
+ return sst_platform_get_resources_edk(ctx, pdev);
+ } else if (!strncmp(hid, "LPE0F28", 7)) {
+ ctx->pci_id = SST_BYT_PCI_ID;
+ return sst_platform_get_resources_fdk(ctx, pdev);
+ } else {
+ pr_err("Invalid device\n");
+ return -EINVAL;
+ }
+}
+
+int sst_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *device;
+ const char *hid;
+ int i, ret = 0;
+ struct intel_sst_drv *ctx;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret) {
+ pr_err("%s: could not get acpi device - %d\n", __func__, ret);
+ return -ENODEV;
+ }
+
+ if (acpi_bus_get_status(device) || !device->status.present) {
+ pr_err("%s: device has invalid status", __func__);
+ return -ENODEV;
+ }
+
+ hid = acpi_device_hid(device);
+ pr_debug("%s for %s", __func__, hid);
+ ret = sst_alloc_drv_context(dev);
+ if (ret)
+ return ret;
+ ctx = sst_drv_ctx;
+ ctx->dev = dev;
+ ctx->hid = hid;
+
+ ret = sst_platform_get_resources(hid, ctx, pdev);
+ if (ret)
+ return ret;
+ /* need to save shim registers in BYT */
+ ctx->shim_regs64 = devm_kzalloc(dev, sizeof(*ctx->shim_regs64),
+ GFP_KERNEL);
+ if (!ctx->shim_regs64)
+ return -ENOMEM;
+
+ ret = sst_driver_ops(ctx);
+ if (ret != 0)
+ return -EINVAL;
+
+ sst_init_locks(ctx);
+
+ ctx->stream_cnt = 0;
+ ctx->fw_in_mem = NULL;
+ ctx->use_dma = 1;
+ ctx->use_lli = 1;
+
+ if (sst_workqueue_init(ctx))
+ goto do_free_wq;
+
+ ctx->pdata = sst_get_acpi_driver_data(hid);
+ if (!ctx->pdata)
+ return -EINVAL;
+ if (INTEL_MID_BOARD(3, TABLET, BYT, BLK, PRO, CRV2)) {
+ /* BYT-CR V2 has only mono speaker, while
+ * byt has stereo speaker, for both
+ * HID is same, so platform data also is
+ * same, hence overriding bdata based on spid
+ */
+ ctx->pdata->bdata = &sst_byt_crv2_bdata;
+ pr_info("Overriding bdata for byt-crv2\n");
+ }
+
+ ctx->use_32bit_ops = ctx->pdata->ipc_info->use_32bit_ops;
+ ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
+
+ memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
+
+ ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
+ ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
+
+ pr_debug("Got drv data max stream %d\n",
+ ctx->info.max_streams);
+ for (i = 1; i <= ctx->info.max_streams; i++) {
+ struct stream_info *stream = &ctx->streams[i];
+ mutex_init(&stream->lock);
+ }
+ ret = sst_request_firmware_async(ctx);
+ if (ret) {
+ pr_err("Firmware download failed:%d\n", ret);
+ goto do_free_wq;
+ }
+
+ ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
+ ctx->ops->irq_thread, 0, SST_DRV_NAME,
+ ctx);
+ if (ret)
+ return ret;
+ pr_debug("Registered IRQ %#x\n", ctx->irq_num);
+
+ /*Register LPE Control as misc driver*/
+ ret = misc_register(&lpe_ctrl);
+ if (ret) {
+ pr_err("couldn't register control device\n");
+ goto do_free_wq;
+ }
+ /* mask all SSP and DMA irq to IA - enabled in acpi kernel driver */
+ sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
+
+ if (ctx->use_32bit_ops) {
+ pr_debug("allocate mem for context save/restore\n ");
+ /*allocate mem for fw context save during suspend*/
+ ctx->fw_cntx = devm_kzalloc(ctx->dev, FW_CONTEXT_MEM, GFP_KERNEL);
+ if (!ctx->fw_cntx) {
+ ret = -ENOMEM;
+ goto do_free_misc;
+ }
+ /*setting zero as that is valid mem to restore*/
+ ctx->fw_cntx_size = 0;
+ }
+
+ platform_set_drvdata(pdev, ctx);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ register_sst(dev);
+ sst_debugfs_init(ctx);
+ sst_set_fw_state_locked(ctx, SST_UN_INIT);
+ sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+ pr_info("%s successfully done!\n", __func__);
+ return ret;
+
+do_free_misc:
+ misc_deregister(&lpe_ctrl);
+do_free_wq:
+ sst_destroy_workqueue(ctx);
+
+ sst_drv_ctx = NULL;
+ platform_set_drvdata(pdev, NULL);
+ pr_err("%s: failed with %d\n", __func__, ret);
+ return ret;
+}
+
+/**
+* intel_sst_remove - remove function
+*
+* @pdev: platform device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+int sst_acpi_remove(struct platform_device *pdev)
+{
+ struct intel_sst_drv *ctx;
+
+ ctx = platform_get_drvdata(pdev);
+ sst_debugfs_exit(ctx);
+ pm_runtime_get_noresume(ctx->dev);
+ pm_runtime_disable(ctx->dev);
+ unregister_sst(ctx->dev);
+ sst_set_fw_state_locked(ctx, SST_UN_INIT);
+ misc_deregister(&lpe_ctrl);
+ kfree(ctx->runtime_param.param.addr);
+ flush_scheduled_work();
+ sst_destroy_workqueue(ctx);
+ kfree(ctx->fw_sg_list.src);
+ kfree(ctx->fw_sg_list.dst);
+ ctx->fw_sg_list.list_len = 0;
+ kfree(ctx->fw_in_mem);
+ ctx->fw_in_mem = NULL;
+ sst_memcpy_free_resources();
+ sst_drv_ctx = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#else
+int sst_acpi_probe(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
+
+int sst_acpi_remove(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
+#endif
+
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver");
+MODULE_AUTHOR("Ramesh Babu K V");
+MODULE_AUTHOR("Omair Mohammed Abdullah");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("sst");
--- /dev/null
+
+/*
+ * sst_app_compat_interface.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2013-14 Intel Corp
+ * Authors: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This driver exposes the audio engine functionalities to the ALSA
+ * and middleware.
+ */
+
+/* This file is included from sst.c */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <sound/intel_sst_ioctl.h>
+#include "sst.h"
+
+struct snd_ppp_params32 {
+ __u8 algo_id;/* Post/Pre processing algorithm ID */
+ __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
+ __u8 enable; /* 0= disable, 1= enable*/
+ __u8 operation;
+ __u32 size; /*Size of parameters for all blocks*/
+ __u32 params;
+} __packed;
+
+enum {
+SNDRV_SST_SET_ALGO32 = _IOW('L', 0x30, struct snd_ppp_params32),
+SNDRV_SST_GET_ALGO32 = _IOWR('L', 0x31, struct snd_ppp_params32),
+};
+
+static long sst_algo_compat(unsigned int cmd,
+ struct snd_ppp_params32 __user *arg32)
+{
+ int retval = 0;
+ struct snd_ppp_params32 algo_params32;
+ struct snd_ppp_params algo_params;
+
+ if (copy_from_user(&algo_params32, arg32, sizeof(algo_params32))) {
+ pr_debug("%s: copy from user failed: %d\n", __func__, retval);
+ return -EINVAL;
+ }
+
+ memcpy(&algo_params, &algo_params32, sizeof(algo_params32)-sizeof(__u32));
+ algo_params.params = compat_ptr(algo_params32.params);
+ retval = intel_sst_ioctl_dsp(cmd, &algo_params, (unsigned long)arg32);
+ return retval;
+}
+
+static long intel_sst_ioctl_compat(struct file *file_ptr,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = compat_ptr(arg);
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_SST_DRIVER_INFO):
+ case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+ return intel_sst_ioctl(file_ptr, cmd, (unsigned long)argp);
+ case _IOC_NR(SNDRV_SST_SET_ALGO32):
+ return sst_algo_compat(SNDRV_SST_SET_ALGO, argp);
+ case _IOC_NR(SNDRV_SST_GET_ALGO32):
+ return sst_algo_compat(SNDRV_SST_GET_ALGO, argp);
+
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * sst_app_interface.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This driver exposes the audio engine functionalities to the ALSA
+ * and middleware.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+#include <linux/aio.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <linux/ioctl.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define AM_MODULE 1
+
+/**
+ * intel_sst_open_cntrl - opens a handle to driver
+ *
+ * @i_node: inode structure
+ * @file_ptr:pointer to file
+ *
+ * This function is called by OS when a user space component
+ * tries to get a driver handle to /dev/intel_sst_control.
+ * Only one handle at a time will be allowed
+ * This is for control operations only
+ */
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+ unsigned int retval;
+
+ /* audio manager open */
+ mutex_lock(&sst_drv_ctx->stream_lock);
+ retval = intel_sst_check_device();
+ if (retval) {
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ return retval;
+ }
+ pr_debug("AM handle opened\n");
+
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ return retval;
+}
+
+
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+ /* audio manager close */
+ mutex_lock(&sst_drv_ctx->stream_lock);
+ sst_pm_runtime_put(sst_drv_ctx);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ pr_debug("AM handle closed\n");
+ return 0;
+}
+
+/**
+ * sst_get_max_streams - Function to populate the drv info structure
+ * with the max streams
+ * @info: the out params that holds the drv info
+ *
+ * This function is called when max streams count is required
+**/
+void sst_get_max_streams(struct snd_sst_driver_info *info)
+{
+ pr_debug("info.max_streams %d num_probes %d\n", sst_drv_ctx->info.max_streams,
+ sst_drv_ctx->info.num_probes);
+ info->max_streams = sst_drv_ctx->info.max_streams - sst_drv_ctx->info.num_probes;
+}
+
+/**
+ * sst_create_algo_ipc - create ipc msg for algorithm parameters
+ *
+ * @algo_params: Algorithm parameters
+ * @msg: post msg pointer
+ * @pvt_id: Checked by wake_up_block
+ *
+ * This function is called to create ipc msg
+ * For copying the mailbox data the function returns offset in bytes to mailbox
+ * memory where the mailbox data should be copied after msg header
+ */
+static int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
+ struct ipc_post **msg, int pvt_id)
+{
+ u32 header_size = 0;
+ u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+ - sizeof(algo_params->params) + algo_params->size;
+ u32 offset = 0;
+
+ if (ipc_msg_size > SST_MAILBOX_SIZE)
+ return -ENOMEM;
+ if (sst_create_ipc_msg(msg, true))
+ return -ENOMEM;
+ sst_fill_header(&(*msg)->header,
+ IPC_IA_ALG_PARAMS, 1, pvt_id);
+ (*msg)->header.part.data = ipc_msg_size;
+ memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
+ offset = sizeof(u32);
+ header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+ memcpy((*msg)->mailbox_data + offset, algo_params, header_size);
+ offset += header_size;
+ return offset;
+}
+
+static long sst_send_algo(struct snd_ppp_params *algo_params,
+ struct sst_block *block, enum sst_algo_ops algo)
+{
+ struct ipc_post *msg;
+ int retval;
+ int offset;
+
+ pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+ algo_params->algo_id, algo_params->str_id,
+ algo_params->enable, algo_params->size);
+
+ algo_params->operation = algo;
+
+ offset = sst_create_algo_ipc(algo_params, &msg, block->drv_id);
+ if (offset < 0)
+ return offset;
+
+ if (copy_from_user(msg->mailbox_data + offset,
+ algo_params->params, algo_params->size)) {
+ kfree(msg);
+ return -EFAULT;
+ }
+
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ if (retval) {
+ pr_debug("%s: failed for algo ops %s with retval %d\n",
+ __func__, algo ? "SST_GET_ALGO" : "SST_SET_ALGO", retval);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * intel_sst_ioctl_dsp - receives the device ioctl's
+ *
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called when a user space component
+ * sends a DSP Ioctl to SST driver
+ */
+long intel_sst_ioctl_dsp(unsigned int cmd,
+ struct snd_ppp_params *algo_params, unsigned long arg)
+{
+ int retval = 0;
+ struct snd_ppp_params *algo_params_copied;
+ struct sst_block *block;
+ int pvt_id;
+
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ block = sst_create_block(sst_drv_ctx, IPC_IA_ALG_PARAMS, pvt_id);
+ if (block == NULL)
+ return -ENOMEM;
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_SST_SET_ALGO):
+ retval = sst_send_algo(algo_params, block, SST_SET_ALGO);
+ break;
+
+ case _IOC_NR(SNDRV_SST_GET_ALGO):
+ retval = sst_send_algo(algo_params, block, SST_GET_ALGO);
+ if (retval)
+ break;
+ algo_params_copied = (struct snd_ppp_params *)block->data;
+
+ if (algo_params_copied->size > algo_params->size) {
+ pr_debug("mem insufficient to copy\n");
+ retval = -EMSGSIZE;
+ break;
+ } else {
+ char __user *tmp;
+ struct snd_ppp_params *get_params;
+ char *pp;
+
+ tmp = (char __user *)arg + offsetof(
+ struct snd_ppp_params, size);
+ if (copy_to_user(tmp, &algo_params_copied->size,
+ sizeof(u32))) {
+ retval = -EFAULT;
+ break;
+ }
+ tmp = (char __user *)arg + offsetof(
+ struct snd_ppp_params, enable);
+ if (copy_to_user(tmp, &algo_params_copied->enable,
+ sizeof(u8))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (algo_params_copied->size == 0)
+ break;
+
+ get_params = kmalloc(sizeof(*get_params), GFP_KERNEL);
+ if (!get_params) {
+ pr_err("sst: mem alloc failed\n");
+ break;
+ }
+ memcpy(get_params, algo_params_copied,
+ sizeof(*get_params));
+
+ get_params->params = kmalloc(get_params->size, GFP_KERNEL);
+ if (!get_params->params) {
+ pr_err("sst: mem alloc failed\n");
+ goto free_mem;
+ }
+ pp = (char *)algo_params_copied;
+ pp = pp + sizeof(*get_params) -
+ sizeof(get_params->params);
+ memcpy(get_params->params, pp, get_params->size);
+ if (copy_to_user(algo_params->params,
+ get_params->params,
+ get_params->size)) {
+ retval = -EFAULT;
+ }
+ kfree(get_params->params);
+
+free_mem:
+ kfree(get_params);
+
+ }
+ break;
+ }
+ sst_free_block(sst_drv_ctx, block);
+ pr_debug("ioctl dsp return = %d, for cmd = %x\n", retval, cmd);
+ return retval;
+}
+
+static long sst_ioctl_tuning_params(unsigned int cmd, unsigned long arg)
+{
+ struct snd_sst_tuning_params params;
+ struct ipc_post *msg;
+ unsigned long address;
+
+ if (copy_from_user(¶ms, (void __user *)arg, sizeof(params)))
+ return -EFAULT;
+ pr_debug("sst: Parameter %d, Stream %d, Size %d\n", params.type,
+ params.str_id, params.size);
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+ address = (unsigned long)params.addr;
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+ sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1,
+ params.str_id);
+ break;
+ }
+ msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
+ memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), ¶ms, sizeof(params));
+ /* driver doesn't need to send address, so overwrite addr with data */
+ if (copy_from_user(msg->mailbox_data + sizeof(u32)
+ + sizeof(params) - sizeof(params.addr),
+ (void __user *)address, params.size)) {
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ return -EFAULT;
+ }
+
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ return 0;
+}
+/**
+ * intel_sst_ioctl - receives the device ioctl's
+ * @file_ptr:pointer to file
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called by OS when a user space component
+ * sends an Ioctl to SST driver
+ */
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct snd_ppp_params algo_params;
+
+ if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
+ return -EBUSY;
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
+ struct snd_sst_driver_info info;
+
+ pr_debug("SNDRV_SST_DRIVER_INFO received\n");
+ sst_get_max_streams(&info);
+
+ if (copy_to_user((void __user *)arg, &info,
+ sizeof(info)))
+ retval = -EFAULT;
+ break;
+ }
+ case _IOC_NR(SNDRV_SST_GET_ALGO):
+ case _IOC_NR(SNDRV_SST_SET_ALGO):
+ if (copy_from_user(&algo_params, (void __user *)arg,
+ sizeof(algo_params))) {
+ return -EFAULT;
+ }
+ retval = intel_sst_ioctl_dsp(cmd, &algo_params, arg);
+ break;
+
+ case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+ retval = sst_ioctl_tuning_params(cmd, arg);
+ break;
+
+ default:
+ retval = -EINVAL;
+ }
+ pr_debug("intel_sst_ioctl:complete ret code = %d for command = %x\n", retval, cmd);
+ return retval;
+}
+
--- /dev/null
+/*
+ * sst_debug.c - Intel SST Driver debugfs support
+ *
+ * Copyright (C) 2012 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file contains all debugfs functions
+ * Support includes:
+ * - Disabling/Enabling runtime PM for SST
+ * - Reading/Writing SST SHIM registers
+ * - Reading/Enabling Input OSC Clock
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": debugfs: " fmt
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define DMA_NUM_CH 8
+#define DEBUGFS_SSP_BUF_SIZE 300 /* 22 chars * 12 reg*/
+#define DEBUGFS_DMA_BUF_SIZE 2500 /* 32 chars * 78 regs*/
+
+/* Register Offsets of SSP3 and LPE DMA */
+u32 ssp_reg_off[] = {0x0, 0x4, 0x8, 0xC, 0x10, 0x28, 0x2C, 0x30, 0x34, 0x38,
+ 0x3C, 0x40};
+/* Excludes the channel registers */
+u32 dma_reg_off[] = {0x2C0, 0x2C8, 0x2D0, 0x2D8, 0x2E0, 0x2E8,
+ 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320, 0x328, 0x330,
+ 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370, 0x378,
+ 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3C8, 0x3D0,
+ 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8};
+
+static ssize_t sst_debug_shim_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ unsigned long long val = 0;
+ unsigned int addr;
+ char buf[512];
+ char name[8];
+ int pos = 0;
+
+ buf[0] = 0;
+ if (drv->sst_state == SST_SUSPENDED) {
+ pr_err("FW suspended, cannot read SHIM registers\n");
+ return -EFAULT;
+ }
+
+ for (addr = SST_SHIM_BEGIN; addr <= SST_SHIM_END; addr += 8) {
+ switch (drv->pci_id) {
+ case SST_CLV_PCI_ID:
+ val = sst_shim_read(drv->shim, addr);
+ break;
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_PCI_ID:
+ case SST_CHT_PCI_ID:
+ val = sst_shim_read64(drv->shim, addr);
+ break;
+ }
+
+ name[0] = 0;
+ switch (addr) {
+ case SST_ISRX:
+ strcpy(name, "ISRX"); break;
+ case SST_ISRD:
+ strcpy(name, "ISRD"); break;
+ case SST_IPCX:
+ strcpy(name, "IPCX"); break;
+ case SST_IPCD:
+ strcpy(name, "IPCD"); break;
+ case SST_IMRX:
+ strcpy(name, "IMRX"); break;
+ case SST_IMRD:
+ strcpy(name, "IMRD"); break;
+ }
+ pos += sprintf(buf + pos, "0x%.2x: %.8llx %s\n", addr, val, name);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+}
+
+static ssize_t sst_debug_shim_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ char buf[32];
+ char *start = buf, *end;
+ unsigned long long value;
+ unsigned long reg_addr;
+ int ret_val;
+ size_t buf_size = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ if (drv->sst_state == SST_SUSPENDED) {
+ pr_err("FW suspended, cannot write SHIM registers\n");
+ return -EFAULT;
+ }
+
+ while (*start == ' ')
+ start++;
+ end = start;
+ while (isalnum(*end))
+ end++;
+ *end = 0;
+
+ ret_val = kstrtoul(start, 16, ®_addr);
+ if (ret_val) {
+ pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+ return ret_val;
+ }
+ if (!(SST_SHIM_BEGIN < reg_addr && reg_addr < SST_SHIM_END)) {
+ pr_err("invalid shim address: 0x%lx\n", reg_addr);
+ return -EINVAL;
+ }
+
+ start = end + 1;
+ while (*start == ' ')
+ start++;
+
+ ret_val = kstrtoull(start, 16, &value);
+ if (ret_val) {
+ pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+ return ret_val;
+ }
+
+ pr_debug("writing shim: 0x%.2lx=0x%.8llx", reg_addr, value);
+
+ if (drv->pci_id == SST_CLV_PCI_ID)
+ sst_shim_write(drv->shim, reg_addr, (u32) value);
+ else if (drv->pci_id == SST_MRFLD_PCI_ID)
+ sst_shim_write64(drv->shim, reg_addr, (u64) value);
+
+ /* Userspace has been fiddling around behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
+ return buf_size;
+}
+
+static const struct file_operations sst_debug_shim_ops = {
+ .open = simple_open,
+ .read = sst_debug_shim_read,
+ .write = sst_debug_shim_write,
+ .llseek = default_llseek,
+};
+
+#define RESVD_DUMP_SZ 40
+#define IA_LPE_MAILBOX_DUMP_SZ 100
+#define LPE_IA_MAILBOX_DUMP_SZ 100
+#define SCU_LPE_MAILBOX_DUMP_SZ 256
+#define LPE_SCU_MAILBOX_DUMP_SZ 256
+
+static inline int is_fw_running(struct intel_sst_drv *drv)
+{
+ pm_runtime_get_sync(drv->dev);
+ atomic_inc(&drv->pm_usage_count);
+ if (drv->sst_state != SST_FW_RUNNING) {
+ pr_err("FW not running\n");
+ sst_pm_runtime_put(drv);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static inline int read_buffer_fromio(char *dest, unsigned int sz,
+ const u32 __iomem *from,
+ unsigned int num_dwords)
+{
+ int i;
+ const unsigned int rowsz = 16, groupsz = 4;
+ const unsigned int size = num_dwords * sizeof(u32);
+ unsigned int linelen, printed = 0, remaining = size;
+
+ u8 *tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ memcpy_fromio(tmp, from, size);
+ for (i = 0; i < size; i += rowsz) {
+ linelen = min(remaining, rowsz);
+ remaining -= rowsz;
+ hex_dump_to_buffer(tmp + i, linelen, rowsz, groupsz,
+ dest + printed, sz - printed, false);
+ printed += linelen * 2 + linelen / groupsz - 1;
+ *(dest + printed++) = '\n';
+ *(dest + printed) = 0;
+ }
+ kfree(tmp);
+ return 0;
+}
+
+static inline int copy_sram_to_user_buffer(char __user *user_buf, size_t count, loff_t *ppos,
+ unsigned int num_dwords, const u32 __iomem *from,
+ u32 offset)
+{
+ ssize_t bytes_read;
+ char *buf;
+ int pos;
+ unsigned int bufsz = 48 + sizeof(u32) * num_dwords * (2 + 1) + 1;
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+ *buf = 0;
+ pos = scnprintf(buf, 48, "Reading %u dwords from offset %#x\n",
+ num_dwords, offset);
+ read_buffer_fromio(buf + pos, bufsz - pos, from, num_dwords);
+ bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ kfree(buf);
+ return bytes_read;
+}
+
+static ssize_t sst_debug_sram_lpe_debug_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos, RESVD_DUMP_SZ,
+ (u32 *)(drv->mailbox + SST_RESERVED_OFFSET),
+ SST_RESERVED_OFFSET);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_debug_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_lpe_debug_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_checkpoint_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+ u32 offset;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+
+ offset = sst_drv_ctx->pdata->debugfs_data->checkpoint_offset;
+
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos,
+ sst_drv_ctx->pdata->debugfs_data->checkpoint_size,
+ (u32 *)(drv->mailbox + offset), offset);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_checkpoint_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_lpe_checkpoint_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_ia_lpe_mbox_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos, IA_LPE_MAILBOX_DUMP_SZ,
+ (u32 *)(drv->mailbox + SST_MAILBOX_SEND),
+ SST_MAILBOX_SEND);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_ia_lpe_mbox_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_ia_lpe_mbox_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_ia_mbox_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_IA_MAILBOX_DUMP_SZ,
+ (u32 *)(drv->mailbox + drv->mailbox_recv_offset),
+ drv->mailbox_recv_offset);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_ia_mbox_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_lpe_ia_mbox_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_scu_mbox_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_SCU_MAILBOX_DUMP_SZ,
+ (u32 *)(drv->mailbox + SST_LPE_SCU_MAILBOX),
+ SST_LPE_SCU_MAILBOX);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_scu_mbox_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_lpe_scu_mbox_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_scu_lpe_mbox_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{ struct intel_sst_drv *drv = file->private_data;
+ int ret = 0;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+ ret = copy_sram_to_user_buffer(user_buf, count, ppos, SCU_LPE_MAILBOX_DUMP_SZ,
+ (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+ SST_SCU_LPE_MAILBOX);
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_sram_scu_lpe_mbox_ops = {
+ .open = simple_open,
+ .read = sst_debug_sram_scu_lpe_mbox_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_lpe_log_enable_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ struct ipc_post *msg = NULL;
+ char buf[32];
+ int str_id = 0; /* DUMMY, required by post message */
+ struct snd_sst_lpe_log_params params;
+ int ret_val = 0;
+ char *start = buf, *end;
+ int i = 0;
+ u8 *addr;
+ unsigned long tmp;
+
+ size_t buf_size = min(count, sizeof(buf)-1);
+ memset(¶ms, 0, sizeof(params));
+
+ ret_val = is_fw_running(drv);
+ if (ret_val)
+ return ret_val;
+
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ ret_val = -EFAULT;
+ goto put_pm_runtime;
+ }
+
+ buf[buf_size] = 0;
+
+ addr = ¶ms.dbg_type;
+ for (i = 0; i < (sizeof(params) - sizeof(u8)); i++) {
+ while (*start == ' ')
+ start++;
+ end = start;
+ while (isalnum(*end))
+ end++;
+ *end = 0;
+ ret_val = kstrtoul(start, 16, &tmp);
+ if (ret_val) {
+ pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+ goto put_pm_runtime;
+ }
+ *addr++ = (u8)tmp;
+ start = end + 1;
+ }
+
+ pr_debug("dbg_type = %d module_id = %d log_level = %d\n",
+ params.dbg_type, params.module_id, params.log_level);
+
+ if (params.dbg_type < NO_DEBUG || params.dbg_type > PTI_DEBUG) {
+ ret_val = -EINVAL;
+ goto put_pm_runtime;
+ }
+
+ ret_val = sst_create_ipc_msg(&msg, true);
+ if (ret_val != 0)
+ goto put_pm_runtime;
+
+ if (sst_drv_ctx->pci_id != SST_MRFLD_PCI_ID) {
+ sst_fill_header(&msg->header, IPC_IA_DBG_LOG_ENABLE, 1,
+ str_id);
+ msg->header.part.data = sizeof(u32) + sizeof(params);
+ memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), ¶ms,
+ sizeof(params));
+ }
+ drv->ops->sync_post_message(msg);
+ ret_val = buf_size;
+put_pm_runtime:
+ sst_pm_runtime_put(drv);
+ return ret_val;
+}
+
+/*
+ * Circular buffer hdr -> 0x1000
+ * log data starts at 0x1010
+ */
+static ssize_t sst_debug_lpe_log_enable_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ struct lpe_log_buf_hdr buf_hdr;
+ size_t size1, size2, offset, bytes_read;
+ char *buf = NULL;
+ int ret;
+
+ ret = is_fw_running(drv);
+ if (ret)
+ return ret;
+
+ /* Get the sram lpe log buffer header */
+ memcpy_fromio(&buf_hdr, (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+ sizeof(buf_hdr));
+ if (buf_hdr.rd_addr == buf_hdr.wr_addr) {
+ pr_err("SRAM emptry\n");
+ ret = -ENODATA;
+ goto put_pm_runtime;
+ } else if (buf_hdr.rd_addr < buf_hdr.wr_addr) {
+ size1 = buf_hdr.wr_addr - buf_hdr.rd_addr;
+ offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+ + SST_SCU_LPE_LOG_BUF;
+ pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+ buf = vmalloc(size1);
+ if (buf == NULL) {
+ pr_err("Not enough memory to allocate\n");
+ ret = -ENOMEM;
+ goto put_pm_runtime;
+ }
+ memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+ bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+ buf, size1);
+
+ buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+
+ } else {
+ /* Read including the end address as well */
+ size1 = buf_hdr.end_addr - buf_hdr.rd_addr + 1;
+ offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+ + SST_SCU_LPE_LOG_BUF;
+ pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+ buf = vmalloc(size1);
+ if (buf == NULL) {
+ pr_err("Not enough memory to allocate\n");
+ ret = -ENOMEM;
+ goto put_pm_runtime;
+ }
+ memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+ bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+ buf, size1);
+ if (bytes_read != size1) {
+ buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+ goto update_rd_ptr;
+ }
+
+ /* Wrap around lpe log buffer here */
+ vfree(buf);
+ buf = NULL;
+ size2 = (buf_hdr.wr_addr - buf_hdr.base_addr);
+ offset = SST_SCU_LPE_LOG_BUF;
+ pr_debug("Size = %zu, offset = %zx\n", size2, offset);
+ buf = vmalloc(size2);
+ if (buf == NULL) {
+ pr_err("Not enough memory to allocate\n");
+ ret = -ENOMEM;
+ goto put_pm_runtime;
+ }
+ memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size2);
+ bytes_read += simple_read_from_buffer(user_buf,
+ (count - bytes_read), ppos, buf, size2);
+ buf_hdr.rd_addr = buf_hdr.base_addr + bytes_read - size1;
+
+ }
+update_rd_ptr:
+ if (bytes_read != 0) {
+ memcpy_toio((u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX +
+ 2 * sizeof(u32)), &(buf_hdr.rd_addr), sizeof(u32));
+ pr_debug("read pointer restored\n");
+ }
+ vfree(buf);
+ buf = NULL;
+ ret = bytes_read;
+put_pm_runtime:
+ sst_pm_runtime_put(drv);
+ return ret;
+}
+
+static const struct file_operations sst_debug_lpe_log_enable_ops = {
+ .open = simple_open,
+ .write = sst_debug_lpe_log_enable_write,
+ .read = sst_debug_lpe_log_enable_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t sst_debug_rtpm_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ char *status;
+
+ int usage = atomic_read(&drv->pm_usage_count);
+
+ pr_debug("RTPM usage: %d\n", usage);
+ status = drv->debugfs.runtime_pm_status ? "enabled\n" : "disabled\n";
+ return simple_read_from_buffer(user_buf, count, ppos,
+ status, strlen(status));
+}
+
+static ssize_t sst_debug_rtpm_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *drv = file->private_data;
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ int usage = atomic_read(&drv->pm_usage_count);
+
+ pr_debug("RTPM Usage: %d\n", usage);
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = 0;
+
+ if (!strncmp(buf, "enable\n", sz)) {
+ /* already enabled? */
+ if (drv->debugfs.runtime_pm_status)
+ return -EINVAL;
+ drv->debugfs.runtime_pm_status = 1;
+ pm_runtime_allow(drv->dev);
+ sz = 6; /* strlen("enable") */
+ } else if (!strncmp(buf, "disable\n", sz)) {
+ if (!drv->debugfs.runtime_pm_status)
+ return -EINVAL;
+ drv->debugfs.runtime_pm_status = 0;
+ pm_runtime_forbid(drv->dev);
+ sz = 7; /* strlen("disable") */
+ } else
+ return -EINVAL;
+ return sz;
+}
+
+static const struct file_operations sst_debug_rtpm_ops = {
+ .open = simple_open,
+ .read = sst_debug_rtpm_read,
+ .write = sst_debug_rtpm_write,
+ .llseek = default_llseek,
+};
+
+
+static ssize_t sst_debug_readme_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf =
+ "\nAll files can be read using 'cat'\n"
+ "1. 'echo disable > runtime_pm' disables runtime PM and will prevent SST from suspending.\n"
+ "To enable runtime PM, echo 'enable' to runtime_pm. Dmesg will print the runtime pm usage\n"
+ "if logs are enabled.\n"
+ "2. Write to shim register using 'echo <addr> <value> > shim_dump'.\n"
+ "Valid address range is between 0x00 to 0x80 in increments of 8.\n"
+ "3. echo 1 > fw_clear_context , This sets the flag to skip the context restore\n"
+ "4. echo 1 > fw_clear_cache , This sets the flag to clear the cached copy of firmware\n"
+ "5. echo 1 > fw_reset_state ,This sets the fw state to uninit\n"
+ "6. echo memcpy > fw_dwnld_mode, This will set the firmware download mode to memcpy\n"
+ " echo lli > fw_dwnld_mode, This will set the firmware download mode to\n"
+ "dma lli mode\n"
+ " echo dma > fw_dwnld_mode, This will set the firmware download mode to\n"
+ "dma single block mode\n"
+ "7. iram_dump, dram_dump, interfaces provide mmap support to\n"
+ "get the iram and dram dump, these buffers will have data only\n"
+ "after the recovery is triggered\n";
+
+ const char *ctp_buf =
+ "8. Enable input clock by 'echo enable > osc_clk0'.\n"
+ "This prevents the input OSC clock from switching off till it is disabled by\n"
+ "'echo disable > osc_clk0'. The status of the clock indicated who are using it.\n"
+ "9. lpe_log_enable usage:\n"
+ " echo <dbg_type> <module_id> <log_level> > lpe_log_enable.\n"
+ "10. cat fw_ssp_reg,This will dump the ssp register contents\n"
+ "11. cat fw_dma_reg,This will dump the dma register contents\n";
+
+ const char *mrfld_buf =
+ "8. lpe_log_enable usage:\n"
+ " echo <dbg_type> <module_id> <log_level> > lpe_log_enable.\n"
+ "9. cat fw_ssp_reg,This will dump the ssp register contents\n"
+ "10. cat fw_dma_reg,This will dump the dma register contents\n"
+ "11. ddr_imr_dump interface provides mmap support to get the imr dump,\n"
+ "this buffer will have data only after the recovery is triggered\n"
+ "12. ipc usage:\n"
+ "\t ipc file works only in binary mode. The ipc format is <IPC hdr><dsp hdr><payload>.\n"
+ "\t drv_id in the ipc header will be overwritten with unique driver id in the driver\n";
+
+ char *readme = NULL;
+ const char *buf2 = NULL;
+ int size, ret = 0;
+
+ switch (sst_drv_ctx->pci_id) {
+ case SST_CLV_PCI_ID:
+ size = strlen(buf) + strlen(ctp_buf) + 2;
+ buf2 = ctp_buf;
+ break;
+ case SST_MRFLD_PCI_ID:
+ size = strlen(buf) + strlen(mrfld_buf) + 2;
+ buf2 = mrfld_buf;
+ break;
+ default:
+ size = strlen(buf) + 1;
+ };
+
+ readme = kmalloc(size, GFP_KERNEL);
+ if (readme == NULL) {
+ pr_err("%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (buf2)
+ sprintf(readme, "%s%s\n", buf, buf2);
+ else
+ sprintf(readme, "%s\n", buf);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ readme, strlen(readme));
+ kfree(readme);
+ return ret;
+}
+
+static const struct file_operations sst_debug_readme_ops = {
+ .open = simple_open,
+ .read = sst_debug_readme_read,
+};
+
+static ssize_t sst_debug_osc_clk0_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char status[16];
+ int mode = -1;
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+ mode = intel_scu_ipc_set_osc_clk0(0, CLK0_QUERY);
+#endif
+
+ snprintf(status, 16, "0x%x\n", mode);
+ return simple_read_from_buffer(user_buf, count, ppos,
+ status, strlen(status));
+}
+
+static ssize_t sst_debug_osc_clk0_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = 0;
+
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+ if (!strncmp(buf, "enable\n", sz)) {
+ intel_scu_ipc_set_osc_clk0(true, CLK0_DEBUG);
+ sz = 6; /* strlen("enable") */
+ } else if (!strncmp(buf, "disable\n", sz)) {
+ intel_scu_ipc_set_osc_clk0(false, CLK0_DEBUG);
+ sz = 7; /* strlen("disable") */
+ } else
+ return -EINVAL;
+#endif
+ return sz;
+}
+
+static const struct file_operations sst_debug_osc_clk0_ops = {
+ .open = simple_open,
+ .read = sst_debug_osc_clk0_read,
+ .write = sst_debug_osc_clk0_write,
+};
+
+static ssize_t sst_debug_fw_clear_cntx_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *status;
+
+ status = atomic_read(&sst_drv_ctx->fw_clear_context) ? \
+ "clear fw cntx\n" : "do not clear fw cntx\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cntx_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = 0;
+
+ if (!strncmp(buf, "1\n", sz))
+ atomic_set(&sst_drv_ctx->fw_clear_context, 1);
+ else
+ atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+
+ return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_clear_cntx = {
+ .open = simple_open,
+ .read = sst_debug_fw_clear_cntx_read,
+ .write = sst_debug_fw_clear_cntx_write,
+};
+
+static ssize_t sst_debug_fw_clear_cache_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *status;
+
+ status = atomic_read(&sst_drv_ctx->fw_clear_cache) ? \
+ "cache clear flag set\n" : "cache clear flag not set\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cache_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = 0;
+
+ if (!strncmp(buf, "1\n", sz))
+ atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+ else
+ return -EINVAL;
+
+ return sz;
+}
+
+static const struct file_operations sst_debug_fw_clear_cache = {
+ .open = simple_open,
+ .read = sst_debug_fw_clear_cache_read,
+ .write = sst_debug_fw_clear_cache_write,
+};
+
+static ssize_t sst_debug_fw_reset_state_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char state[16];
+
+ sprintf(state, "%d\n", sst_drv_ctx->sst_state);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ state, strlen(state));
+
+}
+
+static ssize_t sst_debug_fw_reset_state_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = 0;
+
+ if (!strncmp(buf, "1\n", sz))
+ sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+ else
+ return -EINVAL;
+
+ return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_reset_state = {
+ .open = simple_open,
+ .read = sst_debug_fw_reset_state_read,
+ .write = sst_debug_fw_reset_state_write,
+};
+
+static ssize_t sst_debug_dwnld_mode_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *state = "error\n";
+
+ if (sst_drv_ctx->use_dma == 0) {
+ state = "memcpy\n";
+ } else if (sst_drv_ctx->use_dma == 1) {
+ state = sst_drv_ctx->use_lli ? \
+ "lli\n" : "dma\n";
+
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ state, strlen(state));
+
+}
+
+static ssize_t sst_debug_dwnld_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+ char buf[16];
+ int sz = min(count, sizeof(buf)-1);
+
+ if (sst_drv_ctx->sst_state != SST_SUSPENDED &&
+ sst_drv_ctx->sst_state != SST_UN_INIT) {
+ pr_err("FW should be in suspended/uninit state\n");
+ return -EFAULT;
+ }
+
+ if (copy_from_user(buf, user_buf, sz))
+ return -EFAULT;
+ buf[sz] = '\0';
+
+ /* Firmware needs to be downloaded again to populate the lists */
+ atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+
+ if (!strncmp(buf, "memcpy\n", sz)) {
+ sst_drv_ctx->use_dma = 0;
+ } else if (!strncmp(buf, "lli\n", sz)) {
+ sst_drv_ctx->use_dma = 1;
+ sst_drv_ctx->use_lli = 1;
+ } else if (!strncmp(buf, "dma\n", sz)) {
+ sst_drv_ctx->use_dma = 1;
+ sst_drv_ctx->use_lli = 0;
+ }
+ return sz;
+
+}
+
+static const struct file_operations sst_debug_dwnld_mode = {
+ .open = simple_open,
+ .read = sst_debug_dwnld_mode_read,
+ .write = sst_debug_dwnld_mode_write,
+};
+
+static int dump_ssp_port(void __iomem *ssp_base, char *buf, int pos)
+{
+ int index = 0;
+
+ while (index < ARRAY_SIZE(ssp_reg_off)) {
+ pos += sprintf(buf + pos, "Reg: 0x%x: 0x%x\n", ssp_reg_off[index],
+ sst_reg_read(ssp_base, ssp_reg_off[index]));
+ index++;
+ }
+ return pos;
+}
+
+static ssize_t sst_debug_ssp_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buf;
+ int i, pos = 0, off = 0;
+ struct intel_sst_drv *drv = file->private_data;
+ int num_ssp, buf_size, ret;
+
+ num_ssp = sst_drv_ctx->pdata->debugfs_data->num_ssp;
+ buf_size = DEBUGFS_SSP_BUF_SIZE * num_ssp;
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = is_fw_running(drv);
+ if (ret)
+ goto err;
+
+ buf[0] = 0;
+
+ for (i = 0; i < num_ssp ; i++) {
+ if (!sst_drv_ctx->debugfs.ssp[i]) {
+ pr_err("ssp %d port not mapped\n", i);
+ continue;
+ }
+ off = sst_drv_ctx->pdata->debugfs_data->ssp_reg_size * i;
+ pos = dump_ssp_port((sst_drv_ctx->debugfs.ssp[i]), buf, pos);
+ }
+ sst_pm_runtime_put(drv);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations sst_debug_ssp_reg = {
+ .open = simple_open,
+ .read = sst_debug_ssp_reg_read,
+};
+
+static int dump_dma_reg(char *buf, int pos, int dma)
+{
+ int i, index = 0;
+ int off = 0 ;
+ void __iomem *dma_reg;
+
+ if (!sst_drv_ctx->debugfs.dma_reg[dma]) {
+ pr_err("dma %d not mapped\n", dma);
+ return pos;
+ }
+
+ pos += sprintf(buf + pos, "\nDump DMA%d Reg\n\n", dma);
+
+ dma_reg = sst_drv_ctx->debugfs.dma_reg[dma];
+
+ /* Dump the DMA channel registers */
+ for (i = 0; i < DMA_NUM_CH; i++) {
+ pos += sprintf(buf + pos, "SAR%d: 0x%x: 0x%llx\n", i, off,
+ sst_reg_read64(dma_reg, off));
+ off += 8;
+
+ pos += sprintf(buf + pos, "DAR%d: 0x%x: 0x%llx\n", i, off,
+ sst_reg_read64(dma_reg, off));
+ off += 8;
+
+ pos += sprintf(buf + pos, "LLP%d: 0x%x: 0x%llx\n", i, off,
+ sst_reg_read64(dma_reg, off));
+ off += 8;
+
+ pos += sprintf(buf + pos, "CTL%d: 0x%x: 0x%llx\n", i, off,
+ sst_reg_read64(dma_reg, off));
+ off += 0x28;
+
+ pos += sprintf(buf + pos, "CFG%d: 0x%x: 0x%llx\n", i, off,
+ sst_reg_read64(dma_reg, off));
+ off += 0x18;
+ }
+
+ /* Dump the remaining DMA registers */
+ while (index < ARRAY_SIZE(dma_reg_off)) {
+ pos += sprintf(buf + pos, "Reg: 0x%x: 0x%llx\n", dma_reg_off[index],
+ sst_reg_read64(dma_reg, dma_reg_off[index]));
+ index++;
+ }
+ return pos;
+}
+
+static ssize_t sst_debug_dma_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buf;
+ int pos = 0;
+ int ret, i;
+ struct intel_sst_drv *drv = file->private_data;
+ int num_dma, buf_size;
+
+ num_dma = sst_drv_ctx->pdata->debugfs_data->num_dma;
+ buf_size = DEBUGFS_DMA_BUF_SIZE * num_dma;
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = is_fw_running(drv);
+ if (ret)
+ goto err;
+
+ buf[0] = 0;
+
+ for (i = 0; i < num_dma; i++)
+ pos = dump_dma_reg(buf, pos, i);
+
+ sst_pm_runtime_put(drv);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations sst_debug_dma_reg = {
+ .open = simple_open,
+ .read = sst_debug_dma_reg_read,
+};
+
+/**
+ * sst_debug_remap - function remaps the iram/dram buff to userspace
+ *
+ * @vma: vm_area_struct passed from userspace
+ * @buf: Physical addr of the pointer to be remapped
+ * @type: type of the buffer
+ *
+ * Remaps the kernel buffer to the userspace
+ */
+static int sst_debug_remap(struct vm_area_struct *vma, char *buf,
+ enum sst_ram_type type)
+{
+ int retval, length;
+ void *mem_area;
+
+ if (!buf)
+ return -EIO;
+
+ length = vma->vm_end - vma->vm_start;
+ pr_debug("iram length 0x%x\n", length);
+
+ /* round it up to the page bondary */
+ mem_area = (void *)PAGE_ALIGN((unsigned long)buf);
+
+ /* map the whole physically contiguous area in one piece */
+ retval = remap_pfn_range(vma,
+ vma->vm_start,
+ virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
+ length,
+ vma->vm_page_prot);
+ if (retval)
+ pr_err("mapping failed %d ", retval);
+ return retval;
+}
+
+int sst_debug_iram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int retval;
+ struct intel_sst_drv *sst = sst_drv_ctx;
+
+ retval = sst_debug_remap(vma, sst->dump_buf.iram_buf.buf, SST_IRAM);
+
+ return retval;
+}
+
+static const struct file_operations sst_debug_iram_dump = {
+ .open = simple_open,
+ .mmap = sst_debug_iram_dump_mmap,
+};
+
+int sst_debug_dram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int retval;
+ struct intel_sst_drv *sst = sst_drv_ctx;
+
+ retval = sst_debug_remap(vma, sst->dump_buf.dram_buf.buf, SST_DRAM);
+
+ return retval;
+}
+
+static const struct file_operations sst_debug_dram_dump = {
+ .open = simple_open,
+ .mmap = sst_debug_dram_dump_mmap,
+};
+
+int sst_debug_ddr_imr_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int retval;
+ struct intel_sst_drv *sst = sst_drv_ctx;
+
+ retval = sst_debug_remap(vma, sst->ddr, 0);
+
+ return retval;
+}
+
+static const struct file_operations sst_debug_ddr_imr_dump = {
+ .open = simple_open,
+ .mmap = sst_debug_ddr_imr_dump_mmap,
+};
+
+static ssize_t sst_debug_ipc_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+ unsigned char *buf;
+ struct sst_block *block = NULL;
+ struct ipc_dsp_hdr *dsp_hdr;
+ struct ipc_post *msg = NULL;
+ int ret, res_rqd, msg_id, drv_id;
+ u32 low_payload;
+
+ if (count > 1024)
+ return -EINVAL;
+
+ ret = is_fw_running(ctx);
+ if (ret)
+ return ret;
+
+ buf = kzalloc((sizeof(unsigned char) * (count)), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto put_pm_runtime;
+ }
+ if (copy_from_user(buf, user_buf, count)) {
+ ret = -EFAULT;
+ goto free_mem;
+ }
+
+ if (sst_create_ipc_msg(&msg, true)) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+
+ msg->mrfld_header.full = *((u64 *)buf);
+ pr_debug("ipc hdr: %llx\n", msg->mrfld_header.full);
+
+ /* Override the drv id with unique drv id */
+ drv_id = sst_assign_pvt_id(ctx);
+ msg->mrfld_header.p.header_high.part.drv_id = drv_id;
+
+ res_rqd = msg->mrfld_header.p.header_high.part.res_rqd;
+ msg_id = msg->mrfld_header.p.header_high.part.msg_id;
+ pr_debug("res_rqd: %d, msg_id: %d, drv_id: %d\n",
+ res_rqd, msg_id, drv_id);
+ if (res_rqd) {
+ block = sst_create_block(ctx, msg_id, drv_id);
+ if (block == NULL) {
+ ret = -ENOMEM;
+ kfree(msg);
+ goto free_mem;
+ }
+ }
+
+ dsp_hdr = (struct ipc_dsp_hdr *)(buf + 8);
+ pr_debug("dsp hdr: %llx\n", *((u64 *)(dsp_hdr)));
+ low_payload = msg->mrfld_header.p.header_low_payload;
+ if (low_payload > (1024 - sizeof(union ipc_header_mrfld))) {
+ pr_err("Invalid low payload length: %x\n", low_payload);
+ ret = -EINVAL;
+ kfree(msg);
+ goto free_block;
+ }
+
+ memcpy(msg->mailbox_data, (buf+(sizeof(union ipc_header_mrfld))),
+ low_payload);
+ sst_add_to_dispatch_list_and_post(ctx, msg);
+ if (res_rqd) {
+ ret = sst_wait_timeout(ctx, block);
+ if (ret) {
+ pr_err("%s: fw returned err %d\n", __func__, ret);
+ goto free_block;
+ }
+
+ if (msg_id == IPC_GET_PARAMS) {
+ unsigned char *r = block->data;
+ memcpy(ctx->debugfs.get_params_data, r, dsp_hdr->length);
+ ctx->debugfs.get_params_len = dsp_hdr->length;
+ }
+
+ }
+ ret = count;
+free_block:
+ if (res_rqd)
+ sst_free_block(sst_drv_ctx, block);
+free_mem:
+ kfree(buf);
+put_pm_runtime:
+ sst_pm_runtime_put(ctx);
+ return ret;
+}
+
+static ssize_t sst_debug_ipc_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos,
+ ctx->debugfs.get_params_data,
+ ctx->debugfs.get_params_len);
+}
+
+static const struct file_operations sst_debug_ipc_ops = {
+ .open = simple_open,
+ .write = sst_debug_ipc_write,
+ .read = sst_debug_ipc_read,
+};
+
+struct sst_debug {
+ const char *name;
+ const struct file_operations *fops;
+ umode_t mode;
+};
+
+static const struct sst_debug sst_common_dbg_entries[] = {
+ {"runtime_pm", &sst_debug_rtpm_ops, 0600},
+ {"shim_dump", &sst_debug_shim_ops, 0600},
+ {"fw_clear_context", &sst_debug_fw_clear_cntx, 0600},
+ {"fw_clear_cache", &sst_debug_fw_clear_cache, 0600},
+ {"fw_reset_state", &sst_debug_fw_reset_state, 0600},
+ {"fw_dwnld_mode", &sst_debug_dwnld_mode, 0600},
+ {"iram_dump", &sst_debug_iram_dump, 0400},
+ {"dram_dump", &sst_debug_dram_dump, 0400},
+ {"sram_ia_lpe_mailbox", &sst_debug_sram_ia_lpe_mbox_ops, 0400},
+ {"sram_lpe_ia_mailbox", &sst_debug_sram_lpe_ia_mbox_ops, 0400},
+ {"README", &sst_debug_readme_ops, 0400},
+};
+
+static const struct sst_debug ctp_dbg_entries[] = {
+ {"sram_lpe_debug", &sst_debug_sram_lpe_debug_ops, 0400},
+ {"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+ {"sram_lpe_scu_mailbox", &sst_debug_sram_lpe_scu_mbox_ops, 0400},
+ {"sram_scu_lpe_mailbox", &sst_debug_sram_scu_lpe_mbox_ops, 0400},
+ {"lpe_log_enable", &sst_debug_lpe_log_enable_ops, 0400},
+ {"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+ {"fw_dma_reg", &sst_debug_dma_reg, 0400},
+ {"osc_clk0", &sst_debug_osc_clk0_ops, 0600},
+};
+
+static const struct sst_debug mrfld_dbg_entries[] = {
+ {"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+ {"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+ {"fw_dma_reg", &sst_debug_dma_reg, 0400},
+ {"ddr_imr_dump", &sst_debug_ddr_imr_dump, 0400},
+ {"ipc", &sst_debug_ipc_ops, 0400},
+};
+
+void sst_debugfs_create_files(struct intel_sst_drv *sst,
+ const struct sst_debug *entries, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct dentry *dentry;
+ const struct sst_debug *entry = &entries[i];
+
+ dentry = debugfs_create_file(entry->name, entry->mode,
+ sst->debugfs.root, sst, entry->fops);
+ if (dentry == NULL) {
+ pr_err("Failed to create %s file\n", entry->name);
+ return;
+ }
+ }
+}
+
+void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+ int size = 0;
+ const struct sst_debug *debug = NULL;
+
+ sst->debugfs.root = debugfs_create_dir("sst", NULL);
+ if (IS_ERR(sst->debugfs.root) || !sst->debugfs.root) {
+ pr_err("Failed to create debugfs directory\n");
+ return;
+ }
+
+ sst_debugfs_create_files(sst, sst_common_dbg_entries,
+ ARRAY_SIZE(sst_common_dbg_entries));
+
+ /* Initial status is enabled */
+ sst->debugfs.runtime_pm_status = 1;
+
+ if (sst->pci_id == SST_MRFLD_PCI_ID) {
+ debug = mrfld_dbg_entries;
+ size = ARRAY_SIZE(mrfld_dbg_entries);
+ } else if (sst->pci_id == SST_CLV_PCI_ID) {
+ debug = ctp_dbg_entries;
+ size = ARRAY_SIZE(ctp_dbg_entries);
+ }
+
+ if (debug)
+ sst_debugfs_create_files(sst, debug, size);
+
+}
+
+void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+ if (sst->debugfs.runtime_pm_status)
+ pm_runtime_allow(sst->dev);
+ debugfs_remove_recursive(sst->debugfs.root);
+}
--- /dev/null
+/*
+ * sst_drv_interface.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file defines the interface between the platform driver and the SST
+ * driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/math64.h>
+#include <linux/intel_mid_pm.h>
+#include <sound/compress_offload.h>
+#include <sound/pcm.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define NUM_CODEC 2
+#define MIN_FRAGMENT 2
+#define MAX_FRAGMENT 4
+#define MIN_FRAGMENT_SIZE (50 * 1024)
+#define MAX_FRAGMENT_SIZE (1024 * 1024)
+#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
+
+void sst_restore_fw_context(void)
+{
+ struct snd_sst_ctxt_params fw_context;
+ struct ipc_post *msg = NULL;
+ int retval = 0;
+ struct sst_block *block;
+
+ /* Skip the context restore, when fw_clear_context is set */
+ /* fw_clear_context set through debugfs support */
+ if (atomic_read(&sst_drv_ctx->fw_clear_context)) {
+ pr_debug("Skipping restore_fw_context\n");
+ atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+ return;
+ }
+
+ pr_debug("restore_fw_context\n");
+ /*nothing to restore*/
+ if (!sst_drv_ctx->fw_cntx_size)
+ return;
+ pr_debug("restoring context......\n");
+ /*send msg to fw*/
+ retval = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+ IPC_IA_SET_FW_CTXT, 0);
+ if (retval) {
+ pr_err("Can't allocate block/msg. No restore fw_context\n");
+ return;
+ }
+
+ sst_drv_ctx->sst_state = SST_FW_CTXT_RESTORE;
+ sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
+
+ msg->header.part.data = sizeof(fw_context) + sizeof(u32);
+ fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
+ fw_context.size = sst_drv_ctx->fw_cntx_size;
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32),
+ &fw_context, sizeof(fw_context));
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ sst_free_block(sst_drv_ctx, block);
+ if (retval)
+ pr_err("sst_restore_fw_context..timeout!\n");
+ return;
+}
+
+/*
+ * sst_download_fw - download the audio firmware to DSP
+ *
+ * This function is called when the FW needs to be downloaded to SST DSP engine
+ */
+int sst_download_fw(void)
+{
+ int retval = 0;
+
+ retval = sst_load_fw();
+ if (retval)
+ return retval;
+ pr_debug("fw loaded successful!!!\n");
+
+ if (sst_drv_ctx->ops->restore_dsp_context)
+ sst_drv_ctx->ops->restore_dsp_context();
+ sst_drv_ctx->sst_state = SST_FW_RUNNING;
+ return retval;
+}
+
+int free_stream_context(unsigned int str_id)
+{
+ struct stream_info *stream;
+ int ret = 0;
+
+ stream = get_stream_info(str_id);
+ if (stream) {
+ /* str_id is valid, so stream is alloacted */
+ ret = sst_free_stream(str_id);
+ if (ret)
+ sst_clean_stream(&sst_drv_ctx->streams[str_id]);
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * sst_send_algo_param - send LPE Mixer param to SST
+ *
+ * this function sends the algo parameter to sst dsp engine
+ */
+static int sst_send_algo_param(struct snd_ppp_params *algo_params)
+{
+ u32 header_size = 0;
+ struct ipc_post *msg = NULL;
+ u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+ - sizeof(algo_params->params) + algo_params->size;
+ u32 offset = 0;
+
+ if (ipc_msg_size > SST_MAILBOX_SIZE)
+ return -ENOMEM;
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+ sst_fill_header(&msg->header,
+ IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
+ msg->header.part.data = sizeof(u32) + sizeof(*algo_params)
+ - sizeof(algo_params->params) + algo_params->size;
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ offset = sizeof(u32);
+ header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+ memcpy(msg->mailbox_data + sizeof(u32), algo_params,
+ sizeof(*algo_params) - sizeof(algo_params->params));
+ offset += header_size;
+ memcpy(msg->mailbox_data + offset , algo_params->params,
+ algo_params->size);
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ return 0;
+}
+
+static int sst_send_lpe_mixer_algo_params(void)
+{
+ struct snd_ppp_params algo_param;
+ struct snd_ppp_mixer_params mixer_param;
+ unsigned int input_mixer, stream_device_id;
+ int retval = 0;
+
+ retval = intel_sst_check_device();
+ if (retval) {
+ pr_err("sst_check_device failed %d\n", retval);
+ return retval;
+ }
+
+ mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+ input_mixer = (sst_drv_ctx->device_input_mixer)
+ & SST_INPUT_STREAM_MIXED;
+ pr_debug("Input Mixer settings %d", input_mixer);
+ stream_device_id = sst_drv_ctx->device_input_mixer - input_mixer;
+ algo_param.algo_id = SST_ALGO_MIXER;
+ algo_param.str_id = stream_device_id;
+ algo_param.enable = 1;
+ algo_param.operation = SST_SET_ALGO;
+ algo_param.size = sizeof(mixer_param);
+ mixer_param.type = SST_ALGO_PARAM_MIXER_STREAM_CFG;
+ mixer_param.input_stream_bitmap = input_mixer;
+ mixer_param.size = sizeof(input_mixer);
+ algo_param.params = &mixer_param;
+ mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+ pr_debug("setting pp param\n");
+ pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+ algo_param.algo_id, algo_param.str_id,
+ algo_param.enable, algo_param.size);
+ sst_send_algo_param(&algo_param);
+ sst_pm_runtime_put(sst_drv_ctx);
+ return retval;
+}
+
+/*
+ * sst_get_stream_allocated - this function gets a stream allocated with
+ * the given params
+ *
+ * @str_param : stream params
+ * @lib_dnld : pointer to pointer of lib downlaod struct
+ *
+ * This creates new stream id for a stream, in case lib is to be downloaded to
+ * DSP, it downloads that
+ */
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+ struct snd_sst_lib_download **lib_dnld)
+{
+ int retval, str_id;
+ struct sst_block *block;
+ struct snd_sst_alloc_response *response;
+ struct stream_info *str_info;
+
+ pr_debug("In %s\n", __func__);
+ block = sst_create_block(sst_drv_ctx, 0, 0);
+ if (block == NULL)
+ return -ENOMEM;
+
+ retval = sst_drv_ctx->ops->alloc_stream((char *) str_param, block);
+ str_id = retval;
+ if (retval < 0) {
+ pr_err("sst_alloc_stream failed %d\n", retval);
+ goto free_block;
+ }
+ pr_debug("Stream allocated %d\n", retval);
+ str_info = get_stream_info(str_id);
+ if (str_info == NULL) {
+ pr_err("get stream info returned null\n");
+ str_id = -EINVAL;
+ goto free_block;
+ }
+
+ /* Block the call for reply */
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ if (block->data) {
+ response = (struct snd_sst_alloc_response *)block->data;
+ retval = response->str_type.result;
+ if (!retval)
+ goto free_block;
+
+ pr_err("sst: FW alloc failed retval %d\n", retval);
+ if (retval == SST_ERR_STREAM_IN_USE) {
+ pr_err("sst:FW not in clean state, send free for:%d\n",
+ str_id);
+ sst_free_stream(str_id);
+ *lib_dnld = NULL;
+ }
+ if (retval == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
+ *lib_dnld = kzalloc(sizeof(**lib_dnld), GFP_KERNEL);
+ if (*lib_dnld == NULL) {
+ str_id = -ENOMEM;
+ goto free_block;
+ }
+ memcpy(*lib_dnld, &response->lib_dnld, sizeof(**lib_dnld));
+ sst_clean_stream(str_info);
+ } else {
+ *lib_dnld = NULL;
+ }
+ str_id = -retval;
+ } else if (retval != 0) {
+ pr_err("sst: FW alloc failed retval %d\n", retval);
+ /* alloc failed, so reset the state to uninit */
+ str_info->status = STREAM_UN_INIT;
+ str_id = retval;
+ }
+free_block:
+ sst_free_block(sst_drv_ctx, block);
+ return str_id; /*will ret either error (in above if) or correct str id*/
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_sfreq(struct snd_sst_params *str_param)
+{
+ switch (str_param->codec) {
+ case SST_CODEC_TYPE_PCM:
+ return str_param->sparams.uc.pcm_params.sfreq;
+ case SST_CODEC_TYPE_AAC:
+ return str_param->sparams.uc.aac_params.externalsr;
+ case SST_CODEC_TYPE_MP3:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_num_channel(struct snd_sst_params *str_param)
+{
+ switch (str_param->codec) {
+ case SST_CODEC_TYPE_PCM:
+ return str_param->sparams.uc.pcm_params.num_chan;
+ case SST_CODEC_TYPE_MP3:
+ return str_param->sparams.uc.mp3_params.num_chan;
+ case SST_CODEC_TYPE_AAC:
+ return str_param->sparams.uc.aac_params.num_chan;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * sst_get_stream - this function prepares for stream allocation
+ *
+ * @str_param : stream param
+ */
+int sst_get_stream(struct snd_sst_params *str_param)
+{
+ int retval;
+ struct stream_info *str_info;
+ struct snd_sst_lib_download *lib_dnld;
+
+ pr_debug("In %s\n", __func__);
+ /* stream is not allocated, we are allocating */
+ retval = sst_get_stream_allocated(str_param, &lib_dnld);
+
+ if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
+ /* codec download is required */
+
+ pr_debug("Codec is required.... trying that\n");
+ if (lib_dnld == NULL) {
+ pr_err("lib download null!!! abort\n");
+ return -EIO;
+ }
+
+ retval = sst_load_library(lib_dnld, str_param->ops);
+ kfree(lib_dnld);
+
+ if (!retval) {
+ pr_debug("codec was downloaded successfully\n");
+
+ retval = sst_get_stream_allocated(str_param, &lib_dnld);
+ if (retval <= 0) {
+ retval = -EIO;
+ goto err;
+ }
+
+ pr_debug("Alloc done stream id %d\n", retval);
+ } else {
+ pr_debug("codec download failed\n");
+ retval = -EIO;
+ goto err;
+ }
+ } else if (retval <= 0) {
+ retval = -EIO;
+ goto err;
+ }
+ /* store sampling freq */
+ str_info = &sst_drv_ctx->streams[retval];
+ str_info->sfreq = sst_get_sfreq(str_param);
+
+err:
+ return retval;
+}
+
+/**
+* intel_sst_check_device - checks SST device
+*
+* This utility function checks the state of SST device and downlaods FW if
+* not done, or resumes the device if suspended
+*/
+int intel_sst_check_device(void)
+{
+ int retval = 0;
+
+ pr_debug("In %s\n", __func__);
+
+ pm_runtime_get_sync(sst_drv_ctx->dev);
+ atomic_inc(&sst_drv_ctx->pm_usage_count);
+
+ pr_debug("%s: count is %d now\n", __func__,
+ atomic_read(&sst_drv_ctx->pm_usage_count));
+
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ if (sst_drv_ctx->sst_state == SST_UN_INIT)
+ sst_drv_ctx->sst_state = SST_START_INIT;
+
+ if (sst_drv_ctx->sst_state == SST_START_INIT ||
+ sst_drv_ctx->sst_state == SST_FW_LIB_LOAD) {
+
+ /* FW is not downloaded */
+ pr_debug("DSP Downloading FW now...\n");
+ retval = sst_download_fw();
+ if (retval) {
+ pr_err("FW download fail %x\n", retval);
+ sst_drv_ctx->sst_state = SST_UN_INIT;
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ sst_pm_runtime_put(sst_drv_ctx);
+ return retval;
+ }
+ }
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ return retval;
+}
+
+void sst_process_mad_ops(struct work_struct *work)
+{
+
+ struct mad_ops_wq *mad_ops =
+ container_of(work, struct mad_ops_wq, wq);
+ int retval = 0;
+
+ switch (mad_ops->control_op) {
+ case SST_SND_PAUSE:
+ retval = sst_pause_stream(mad_ops->stream_id);
+ break;
+ case SST_SND_RESUME:
+ retval = sst_resume_stream(mad_ops->stream_id);
+ break;
+ default:
+ pr_err(" wrong control_ops reported\n");
+ }
+ if (retval)
+ pr_err("%s(): op: %d, retval: %d\n",
+ __func__, mad_ops->control_op, retval);
+ kfree(mad_ops);
+ return;
+}
+
+static int sst_power_control(bool state)
+{
+ pr_debug("%s for %d", __func__, state);
+
+ /* should we do ref count here, or rely on pcm handle?? */
+ if (state == true)
+ return intel_sst_check_device();
+ else
+ return sst_pm_runtime_put(sst_drv_ctx);
+}
+/*
+ * sst_open_pcm_stream - Open PCM interface
+ *
+ * @str_param: parameters of pcm stream
+ *
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
+ */
+static int sst_open_pcm_stream(struct snd_sst_params *str_param)
+{
+ int retval;
+
+ if (!str_param)
+ return -EINVAL;
+
+ pr_debug("%s: doing rtpm_get\n", __func__);
+
+ retval = intel_sst_check_device();
+
+ if (retval)
+ return retval;
+ retval = sst_get_stream(str_param);
+ if (retval > 0) {
+ sst_drv_ctx->stream_cnt++;
+ } else {
+ pr_err("sst_get_stream returned err %d\n", retval);
+ sst_pm_runtime_put(sst_drv_ctx);
+ }
+
+ return retval;
+}
+
+static int sst_cdev_open(struct snd_sst_params *str_params,
+ struct sst_compress_cb *cb)
+{
+ int str_id, retval;
+ struct stream_info *stream;
+
+ pr_debug("%s: doing rtpm_get\n", __func__);
+
+ retval = intel_sst_check_device();
+ if (retval)
+ return retval;
+
+ str_id = sst_get_stream(str_params);
+ if (str_id > 0) {
+ pr_debug("stream allocated in sst_cdev_open %d\n", str_id);
+ stream = &sst_drv_ctx->streams[str_id];
+ stream->compr_cb = cb->compr_cb;
+ stream->compr_cb_param = cb->param;
+ stream->drain_notify = cb->drain_notify;
+ stream->drain_cb_param = cb->drain_cb_param;
+ } else {
+ pr_err("stream encountered error during alloc %d\n", str_id);
+ str_id = -EINVAL;
+ sst_pm_runtime_put(sst_drv_ctx);
+ }
+ return str_id;
+}
+
+static int sst_cdev_close(unsigned int str_id)
+{
+ int retval;
+ struct stream_info *stream;
+
+ pr_debug("%s: Entry\n", __func__);
+ stream = get_stream_info(str_id);
+ if (!stream) {
+ pr_err("stream info is NULL for str %d!!!\n", str_id);
+ return -EINVAL;
+ }
+
+ if (stream->status == STREAM_RESET) {
+ /* silently fail here as we have cleaned the stream */
+ pr_debug("stream in reset state...\n");
+ stream->status = STREAM_UN_INIT;
+
+ retval = 0;
+ goto put;
+ }
+
+ retval = sst_free_stream(str_id);
+put:
+ stream->compr_cb_param = NULL;
+ stream->compr_cb = NULL;
+
+ /* The free_stream will return a error if there is no stream to free,
+ (i.e. the alloc failure case). And in this case the open does a put in
+ the error scenario, so skip in this case.
+ In the close we need to handle put in the success scenario and
+ the timeout error(EBUSY) scenario. */
+ if (!retval || (retval == -EBUSY))
+ sst_pm_runtime_put(sst_drv_ctx);
+ else
+ pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+ pr_debug("%s: End\n", __func__);
+ return retval;
+
+}
+
+static int sst_cdev_ack(unsigned int str_id, unsigned long bytes)
+{
+ struct stream_info *stream;
+ struct snd_sst_tstamp fw_tstamp = {0,};
+ int offset;
+ void __iomem *addr;
+
+ pr_debug("sst: ackfor %d\n", str_id);
+ stream = get_stream_info(str_id);
+ if (!stream)
+ return -EINVAL;
+
+ /* update bytes sent */
+ stream->cumm_bytes += bytes;
+ pr_debug("bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes);
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+ +(str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+
+ fw_tstamp.bytes_copied = stream->cumm_bytes;
+ pr_debug("bytes sent to fw %llu inc by %ld\n", fw_tstamp.bytes_copied,
+ bytes);
+
+ addr = ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)) +
+ (str_id * sizeof(fw_tstamp));
+ offset = offsetof(struct snd_sst_tstamp, bytes_copied);
+ sst_shim_write(addr, offset, fw_tstamp.bytes_copied);
+ return 0;
+
+}
+
+static int sst_cdev_set_metadata(unsigned int str_id,
+ struct snd_compr_metadata *metadata)
+{
+ int retval = 0, pvt_id, len;
+ struct ipc_post *msg = NULL;
+ struct stream_info *str_info;
+ struct ipc_dsp_hdr dsp_hdr;
+
+ pr_debug("set metadata for stream %d\n", str_id);
+
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+
+ if (sst_create_ipc_msg(&msg, 1))
+ return -ENOMEM;
+
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ pr_debug("pvt id = %d\n", pvt_id);
+ pr_debug("pipe id = %d\n", str_info->pipe_id);
+ sst_fill_header_mrfld(&msg->mrfld_header,
+ IPC_CMD, str_info->task_id, 1, pvt_id);
+
+ len = sizeof(*metadata) + sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_SET_STREAM_PARAMS_MRFLD,
+ str_info->pipe_id, sizeof(*metadata));
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+ metadata, sizeof(*metadata));
+ } else {
+ sst_fill_header(&msg->header, IPC_IA_SET_STREAM_PARAMS,
+ 1, str_id);
+ msg->header.part.data = sizeof(u32) + sizeof(*metadata);
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32),
+ metadata, sizeof(*metadata));
+ }
+
+ sst_drv_ctx->ops->sync_post_message(msg);
+ return retval;
+}
+
+static int sst_cdev_control(unsigned int cmd, unsigned int str_id)
+{
+ pr_debug("recieved cmd %d on stream %d\n", cmd, str_id);
+
+ if (sst_drv_ctx->sst_state == SST_UN_INIT)
+ return 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return sst_pause_stream(str_id);
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ return sst_resume_stream(str_id);
+ case SNDRV_PCM_TRIGGER_START: {
+ struct stream_info *str_info;
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_RUNNING;
+ return sst_start_stream(str_id);
+ }
+ case SNDRV_PCM_TRIGGER_STOP:
+ return sst_drop_stream(str_id);
+ case SND_COMPR_TRIGGER_DRAIN:
+ return sst_drain_stream(str_id, false);
+ case SND_COMPR_TRIGGER_NEXT_TRACK:
+ return sst_next_track();
+ case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+ return sst_drain_stream(str_id, true);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sst_cdev_tstamp(unsigned int str_id, struct snd_compr_tstamp *tstamp)
+{
+ struct snd_sst_tstamp fw_tstamp = {0,};
+ struct stream_info *stream;
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+ +(str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+
+ stream = get_stream_info(str_id);
+ if (!stream)
+ return -EINVAL;
+ pr_debug("rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter);
+
+ tstamp->copied_total = fw_tstamp.ring_buffer_counter;
+ tstamp->pcm_frames = fw_tstamp.frames_decoded;
+ tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
+ (u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
+ tstamp->sampling_rate = fw_tstamp.sampling_frequency;
+ pr_debug("PCM = %u\n", tstamp->pcm_io_frames);
+ pr_debug("Pointer Query on strid = %d copied_total %d, decodec %d\n",
+ str_id, tstamp->copied_total, tstamp->pcm_frames);
+ pr_debug("rendered %d\n", tstamp->pcm_io_frames);
+ return 0;
+}
+
+static int sst_cdev_caps(struct snd_compr_caps *caps)
+{
+ caps->num_codecs = NUM_CODEC;
+ caps->min_fragment_size = MIN_FRAGMENT_SIZE; /* 50KB */
+ caps->max_fragment_size = MAX_FRAGMENT_SIZE; /* 1024KB */
+ caps->min_fragments = MIN_FRAGMENT;
+ caps->max_fragments = MAX_FRAGMENT;
+ caps->codecs[0] = SND_AUDIOCODEC_MP3;
+ caps->codecs[1] = SND_AUDIOCODEC_AAC;
+ return 0;
+}
+
+static int sst_cdev_codec_caps(struct snd_compr_codec_caps *codec)
+{
+
+ if (codec->codec == SND_AUDIOCODEC_MP3) {
+ codec->num_descriptors = 2;
+ codec->descriptor[0].max_ch = 2;
+ codec->descriptor[0].sample_rates = SNDRV_PCM_RATE_8000_48000;
+ codec->descriptor[0].bit_rate[0] = 320; /* 320kbps */
+ codec->descriptor[0].bit_rate[1] = 192;
+ codec->descriptor[0].num_bitrates = 2;
+ codec->descriptor[0].profiles = 0;
+ codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
+ codec->descriptor[0].formats = 0;
+ } else if (codec->codec == SND_AUDIOCODEC_AAC) {
+ codec->num_descriptors = 2;
+ codec->descriptor[1].max_ch = 2;
+ codec->descriptor[1].sample_rates = SNDRV_PCM_RATE_8000_48000;
+ codec->descriptor[1].bit_rate[0] = 320; /* 320kbps */
+ codec->descriptor[1].bit_rate[1] = 192;
+ codec->descriptor[1].num_bitrates = 2;
+ codec->descriptor[1].profiles = 0;
+ codec->descriptor[1].modes = 0;
+ codec->descriptor[1].formats =
+ (SND_AUDIOSTREAMFORMAT_MP4ADTS |
+ SND_AUDIOSTREAMFORMAT_RAW);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void sst_cdev_fragment_elapsed(int str_id)
+{
+ struct stream_info *stream;
+
+ pr_debug("fragment elapsed from firmware for str_id %d\n", str_id);
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->compr_cb)
+ stream->compr_cb(stream->compr_cb_param);
+}
+
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+static int sst_close_pcm_stream(unsigned int str_id)
+{
+ struct stream_info *stream;
+ int retval = 0;
+
+ pr_debug("%s: Entry\n", __func__);
+ stream = get_stream_info(str_id);
+ if (!stream) {
+ pr_err("stream info is NULL for str %d!!!\n", str_id);
+ return -EINVAL;
+ }
+
+ if (stream->status == STREAM_RESET) {
+ /* silently fail here as we have cleaned the stream */
+ pr_debug("stream in reset state...\n");
+
+ retval = 0;
+ goto put;
+ }
+
+ retval = free_stream_context(str_id);
+put:
+ stream->pcm_substream = NULL;
+ stream->status = STREAM_UN_INIT;
+ stream->period_elapsed = NULL;
+ sst_drv_ctx->stream_cnt--;
+
+ /* The free_stream will return a error if there is no stream to free,
+ (i.e. the alloc failure case). And in this case the open does a put in
+ the error scenario, so skip in this case.
+ In the close we need to handle put in the success scenario and
+ the timeout error(EBUSY) scenario. */
+ if (!retval || (retval == -EBUSY))
+ sst_pm_runtime_put(sst_drv_ctx);
+ else
+ pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+ pr_debug("%s: Exit\n", __func__);
+ return 0;
+}
+
+int sst_send_sync_msg(int ipc, int str_id)
+{
+ struct ipc_post *msg = NULL;
+
+ if (sst_create_ipc_msg(&msg, false))
+ return -ENOMEM;
+ sst_fill_header(&msg->header, ipc, 0, str_id);
+ return sst_drv_ctx->ops->sync_post_message(msg);
+}
+
+static inline int sst_calc_tstamp(struct pcm_stream_info *info,
+ struct snd_pcm_substream *substream,
+ struct snd_sst_tstamp *fw_tstamp)
+{
+ size_t delay_bytes, delay_frames;
+ size_t buffer_sz;
+ u32 pointer_bytes, pointer_samples;
+
+ pr_debug("mrfld ring_buffer_counter %llu in bytes\n",
+ fw_tstamp->ring_buffer_counter);
+ pr_debug("mrfld hardware_counter %llu in bytes\n",
+ fw_tstamp->hardware_counter);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ delay_bytes = (size_t) (fw_tstamp->ring_buffer_counter -
+ fw_tstamp->hardware_counter);
+ else
+ delay_bytes = (size_t) (fw_tstamp->hardware_counter -
+ fw_tstamp->ring_buffer_counter);
+ delay_frames = bytes_to_frames(substream->runtime, delay_bytes);
+ buffer_sz = snd_pcm_lib_buffer_bytes(substream);
+ div_u64_rem(fw_tstamp->ring_buffer_counter, buffer_sz, &pointer_bytes);
+ pointer_samples = bytes_to_samples(substream->runtime, pointer_bytes);
+
+ pr_debug("pcm delay %zu in bytes\n", delay_bytes);
+
+ info->buffer_ptr = pointer_samples / substream->runtime->channels;
+
+ info->pcm_delay = delay_frames / substream->runtime->channels;
+ pr_debug("buffer ptr %llu pcm_delay rep: %llu\n",
+ info->buffer_ptr, info->pcm_delay);
+ return 0;
+}
+
+static int sst_read_timestamp(struct pcm_stream_info *info)
+{
+ struct stream_info *stream;
+ struct snd_pcm_substream *substream;
+ struct snd_sst_tstamp fw_tstamp;
+ unsigned int str_id;
+
+ str_id = info->str_id;
+ stream = get_stream_info(str_id);
+ if (!stream)
+ return -EINVAL;
+
+ if (!stream->pcm_substream)
+ return -EINVAL;
+ substream = stream->pcm_substream;
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+ + (str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+ return sst_calc_tstamp(info, substream, &fw_tstamp);
+}
+
+/*
+ * sst_device_control - Set Control params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to set
+ * SST/Sound card controls for an opened stream.
+ * This is registered with MID driver
+ */
+static int sst_device_control(int cmd, void *arg)
+{
+ int retval = 0, str_id = 0;
+
+ if (sst_drv_ctx->sst_state == SST_UN_INIT)
+ return 0;
+
+ switch (cmd) {
+ case SST_SND_PAUSE:
+ case SST_SND_RESUME: {
+ struct mad_ops_wq *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+ INIT_WORK(&work->wq, sst_process_mad_ops);
+ work->control_op = cmd;
+ work->stream_id = *(int *)arg;
+ queue_work(sst_drv_ctx->mad_wq, &work->wq);
+ break;
+ }
+ case SST_SND_START: {
+ struct stream_info *str_info;
+ int ipc;
+ str_id = *(int *)arg;
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ipc = IPC_IA_START_STREAM;
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_RUNNING;
+ sst_start_stream(str_id);
+ break;
+ }
+ case SST_SND_DROP: {
+ struct stream_info *str_info;
+ int ipc;
+ str_id = *(int *)arg;
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ipc = IPC_IA_DROP_STREAM;
+ str_info->prev = STREAM_UN_INIT;
+ str_info->status = STREAM_INIT;
+ if (sst_drv_ctx->use_32bit_ops)
+ retval = sst_send_sync_msg(ipc, str_id);
+ else
+ retval = sst_drop_stream(str_id);
+ break;
+ }
+ case SST_SND_STREAM_INIT: {
+ struct pcm_stream_info *str_info;
+ struct stream_info *stream;
+
+ pr_debug("stream init called\n");
+ str_info = (struct pcm_stream_info *)arg;
+ str_id = str_info->str_id;
+ stream = get_stream_info(str_id);
+ if (!stream) {
+ retval = -EINVAL;
+ break;
+ }
+ pr_debug("setting the period ptrs\n");
+ stream->pcm_substream = str_info->mad_substream;
+ stream->period_elapsed = str_info->period_elapsed;
+ stream->sfreq = str_info->sfreq;
+ stream->prev = stream->status;
+ stream->status = STREAM_INIT;
+ pr_debug("pcm_substream %p, period_elapsed %p, sfreq %d, status %d\n",
+ stream->pcm_substream, stream->period_elapsed, stream->sfreq, stream->status);
+ break;
+ }
+
+ case SST_SND_BUFFER_POINTER: {
+ struct pcm_stream_info *stream_info;
+
+ stream_info = (struct pcm_stream_info *)arg;
+ retval = sst_read_timestamp(stream_info);
+ pr_debug("pointer %llu, delay %llu\n",
+ stream_info->buffer_ptr, stream_info->pcm_delay);
+ break;
+ }
+ default:
+ /* Illegal case */
+ pr_warn("illegal req\n");
+ return -EINVAL;
+ }
+
+ return retval;
+}
+
+/*
+ * sst_copy_runtime_param - copy runtime params from src to dst
+ * structure.
+ *
+ *@dst: destination runtime structure
+ *@src: source runtime structure
+ *
+ * This helper function is called to copy the runtime parameter
+ * structure.
+*/
+static int sst_copy_runtime_param(struct snd_sst_runtime_params *dst,
+ struct snd_sst_runtime_params *src)
+{
+ dst->type = src->type;
+ dst->str_id = src->str_id;
+ dst->size = src->size;
+ if (dst->addr) {
+ pr_err("mem allocated in prev setting, use the same memory\n");
+ return -EINVAL;
+ }
+ dst->addr = kzalloc(dst->size, GFP_KERNEL);
+ if (!dst->addr)
+ return -ENOMEM;
+ memcpy(dst->addr, src->addr, dst->size);
+ return 0;
+}
+/*
+ * sst_set_generic_params - Set generic params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to configure
+ * SST runtime params.
+ */
+static int sst_set_generic_params(enum sst_controls cmd, void *arg)
+{
+ int ret_val = 0;
+ pr_debug("Enter:%s, cmd:%d\n", __func__, cmd);
+
+ if (NULL == arg)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SST_SET_RUNTIME_PARAMS: {
+ struct snd_sst_runtime_params *src;
+ struct snd_sst_runtime_params *dst;
+
+ src = (struct snd_sst_runtime_params *)arg;
+ dst = &(sst_drv_ctx->runtime_param.param);
+ ret_val = sst_copy_runtime_param(dst, src);
+ break;
+ }
+ case SST_SET_ALGO_PARAMS: {
+ unsigned int device_input_mixer = *((unsigned int *)arg);
+ pr_debug("LPE mixer algo param set %x\n", device_input_mixer);
+ mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+ sst_drv_ctx->device_input_mixer = device_input_mixer;
+ mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+ ret_val = sst_send_lpe_mixer_algo_params();
+ break;
+ }
+ case SST_SET_BYTE_STREAM: {
+ ret_val = intel_sst_check_device();
+ if (ret_val)
+ return ret_val;
+
+ ret_val = sst_send_byte_stream_mrfld(arg);
+ sst_pm_runtime_put(sst_drv_ctx);
+ break;
+ }
+ case SST_GET_PROBE_BYTE_STREAM: {
+ struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+ if (sst_drv_ctx->probe_bytes) {
+ prb_bytes->len = sst_drv_ctx->probe_bytes->len;
+ memcpy(prb_bytes->bytes, &sst_drv_ctx->probe_bytes->bytes, prb_bytes->len);
+ }
+ break;
+ }
+ case SST_SET_PROBE_BYTE_STREAM: {
+ struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+ if (sst_drv_ctx->probe_bytes) {
+ sst_drv_ctx->probe_bytes->len = prb_bytes->len;
+ memcpy(&sst_drv_ctx->probe_bytes->bytes, prb_bytes->bytes, prb_bytes->len);
+ }
+
+ ret_val = intel_sst_check_device();
+ if (ret_val)
+ return ret_val;
+
+ ret_val = sst_send_probe_bytes(sst_drv_ctx);
+ break;
+ }
+ case SST_SET_VTSV_INFO: {
+ ret_val = intel_sst_check_device();
+ if (ret_val)
+ return ret_val;
+
+ ret_val = sst_send_vtsv_data_to_fw(sst_drv_ctx);
+ if (ret_val)
+ pr_err("vtsv data send failed\n");
+ sst_pm_runtime_put(sst_drv_ctx);
+ break;
+ }
+ default:
+ pr_err("Invalid cmd request:%d\n", cmd);
+ ret_val = -EINVAL;
+ }
+ return ret_val;
+}
+
+static struct sst_ops pcm_ops = {
+ .open = sst_open_pcm_stream,
+ .device_control = sst_device_control,
+ .set_generic_params = sst_set_generic_params,
+ .close = sst_close_pcm_stream,
+ .power = sst_power_control,
+};
+
+static struct compress_sst_ops compr_ops = {
+ .open = sst_cdev_open,
+ .close = sst_cdev_close,
+ .control = sst_cdev_control,
+ .tstamp = sst_cdev_tstamp,
+ .ack = sst_cdev_ack,
+ .get_caps = sst_cdev_caps,
+ .get_codec_caps = sst_cdev_codec_caps,
+ .set_metadata = sst_cdev_set_metadata,
+};
+
+
+static struct sst_device sst_dsp_device = {
+ .name = "Intel(R) SST LPE",
+ .dev = NULL,
+ .ops = &pcm_ops,
+ .compr_ops = &compr_ops,
+};
+
+/*
+ * register_sst - function to register DSP
+ *
+ * This functions registers DSP with the platform driver
+ */
+int register_sst(struct device *dev)
+{
+ int ret_val;
+ sst_dsp_device.dev = dev;
+ ret_val = sst_register_dsp(&sst_dsp_device);
+ if (ret_val)
+ pr_err("Unable to register DSP with platform driver\n");
+
+ return ret_val;
+}
+
+int unregister_sst(struct device *dev)
+{
+ return sst_unregister_dsp(&sst_dsp_device);
+}
--- /dev/null
+/*
+ * sst_dsp.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file contains all dsp controlling functions like firmware download,
+ * setting/resetting dsp cores, etc
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/dmaengine.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/elf.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+#ifndef CONFIG_X86_64
+#define MEMCPY_TOIO memcpy_toio
+#else
+#define MEMCPY_TOIO memcpy32_toio
+#endif
+
+/**
+ * Add here all the static librairies to be downloaded at bootup
+ */
+static struct sst_module_info sst_modules_mrfld[] = {};
+
+static struct sst_module_info sst_modules_byt[] = {
+ {"mp3_dec", SST_CODEC_TYPE_MP3, 0, SST_LIB_NOT_FOUND},
+ {"aac_dec", SST_CODEC_TYPE_AAC, 0, SST_LIB_NOT_FOUND},
+};
+
+/**
+ * memcpy32_toio: Copy using writel commands
+ *
+ * This is needed because the hardware does not support
+ * 64-bit moveq insructions while writing to PCI MMIO
+ */
+void memcpy32_toio(void *dst, const void *src, int count)
+{
+ int i;
+ const u32 *src_32 = src;
+ u32 *dst_32 = dst;
+
+ for (i = 0; i < count/sizeof(u32); i++)
+ writel(*src_32++, dst_32++);
+}
+
+/**
+ * intel_sst_reset_dsp_medfield - Resetting SST DSP
+ *
+ * This resets DSP in case of Medfield platfroms
+ */
+int intel_sst_reset_dsp_mfld(void)
+{
+ union config_status_reg csr;
+
+ pr_debug("Resetting the DSP in medfield\n");
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.full |= 0x382;
+ csr.part.run_stall = 0x1;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+
+ return 0;
+}
+
+/**
+ * sst_start_medfield - Start the SST DSP processor
+ *
+ * This starts the DSP in MRST platfroms
+ */
+int sst_start_mfld(void)
+{
+ union config_status_reg csr;
+
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.bypass = 0;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.mfld_strb = 1;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.run_stall = 0;
+ csr.part.sst_reset = 0;
+ pr_debug("Starting the DSP_medfld %x\n", csr.full);
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ pr_debug("Starting the DSP_medfld\n");
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+
+ return 0;
+}
+/**
+ * intel_sst_reset_dsp_mrfld - Resetting SST DSP
+ *
+ * This resets DSP in case of MRFLD platfroms
+ */
+int intel_sst_reset_dsp_mrfld(void)
+{
+ union config_status_reg_mrfld csr;
+
+ pr_debug("sst: Resetting the DSP in mrfld\n");
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+ pr_debug("value:0x%llx\n", csr.full);
+
+ csr.full |= 0x7;
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+ pr_debug("value:0x%llx\n", csr.full);
+
+ csr.full &= ~(0x1);
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ pr_debug("value:0x%llx\n", csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+ return 0;
+}
+
+/**
+ * sst_start_merrifield - Start the SST DSP processor
+ *
+ * This starts the DSP in MERRIFIELD platfroms
+ */
+int sst_start_mrfld(void)
+{
+ union config_status_reg_mrfld csr;
+
+ pr_debug("sst: Starting the DSP in mrfld LALALALA\n");
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ pr_debug("value:0x%llx\n", csr.full);
+
+ csr.full |= 0x7;
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ pr_debug("value:0x%llx\n", csr.full);
+
+ csr.part.xt_snoop = 1;
+ csr.full &= ~(0x5);
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ pr_debug("sst: Starting the DSP_merrifield:%llx\n", csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+ return 0;
+}
+
+/**
+ * intel_sst_set_bypass - Sets/clears the bypass bits
+ *
+ * This sets/clears the bypass bits
+ */
+void intel_sst_set_bypass_mfld(bool set)
+{
+ union config_status_reg csr;
+
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ if (set == true)
+ csr.full |= 0x380;
+ else
+ csr.part.bypass = 0;
+ pr_debug("SetupByPass set %d Val 0x%x\n", set, csr.full);
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+
+}
+#define SST_CALC_DMA_DSTN(lpe_viewpt_rqd, ia_viewpt_addr, elf_paddr, \
+ lpe_viewpt_addr) ((lpe_viewpt_rqd) ? \
+ elf_paddr : (ia_viewpt_addr + elf_paddr - lpe_viewpt_addr))
+
+static int sst_fill_dstn(struct intel_sst_drv *sst, struct sst_info info,
+ Elf32_Phdr *pr, void **dstn, unsigned int *dstn_phys, int *mem_type)
+{
+#ifdef MRFLD_WORD_WA
+ /* work arnd-since only 4 byte align copying is only allowed for ICCM */
+ if ((pr->p_paddr >= info.iram_start) && (pr->p_paddr < info.iram_end)) {
+ size_t data_size = pr->p_filesz % SST_ICCM_BOUNDARY;
+
+ if (data_size)
+ pr->p_filesz += 4 - data_size;
+ *dstn = sst->iram + (pr->p_paddr - info.iram_start);
+ *dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+ sst->iram_base, pr->p_paddr, info.iram_start);
+ *mem_type = 1;
+ }
+#else
+ if ((pr->p_paddr >= info.iram_start) &&
+ (pr->p_paddr < info.iram_end)) {
+
+ *dstn = sst->iram + (pr->p_paddr - info.iram_start);
+ *dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+ sst->iram_base, pr->p_paddr, info.iram_start);
+ *mem_type = 1;
+ }
+#endif
+ else if ((pr->p_paddr >= info.dram_start) &&
+ (pr->p_paddr < info.dram_end)) {
+
+ *dstn = sst->dram + (pr->p_paddr - info.dram_start);
+ *dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+ sst->dram_base, pr->p_paddr, info.dram_start);
+ *mem_type = 1;
+ } else if ((pr->p_paddr >= info.imr_start) &&
+ (pr->p_paddr < info.imr_end)) {
+
+ *dstn = sst->ddr + (pr->p_paddr - info.imr_start);
+ *dstn_phys = sst->ddr_base + pr->p_paddr - info.imr_start;
+ *mem_type = 0;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sst_fill_info(struct intel_sst_drv *sst,
+ struct sst_info *info)
+{
+ /* first we setup addresses to be used for elf sections */
+ if (sst->info.iram_use) {
+ info->iram_start = sst->info.iram_start;
+ info->iram_end = sst->info.iram_end;
+ } else {
+ info->iram_start = sst->iram_base;
+ info->iram_end = sst->iram_end;
+ }
+ if (sst->info.dram_use) {
+ info->dram_start = sst->info.dram_start;
+ info->dram_end = sst->info.dram_end;
+ } else {
+ info->dram_start = sst->dram_base;
+ info->dram_end = sst->dram_end;
+ }
+ if (sst->info.imr_use) {
+ info->imr_start = sst->info.imr_start;
+ info->imr_end = sst->info.imr_end;
+ } else {
+ info->imr_start = relocate_imr_addr_mrfld(sst->ddr_base);
+ info->imr_end = relocate_imr_addr_mrfld(sst->ddr_end);
+ }
+
+ info->lpe_viewpt_rqd = sst->info.lpe_viewpt_rqd;
+ info->dma_max_len = sst->info.dma_max_len;
+ pr_debug("%s: dma_max_len 0x%x", __func__, info->dma_max_len);
+}
+
+static inline int sst_validate_elf(const struct firmware *sst_bin, bool dynamic)
+{
+ Elf32_Ehdr *elf;
+
+ BUG_ON(!sst_bin);
+
+ pr_debug("IN %s\n", __func__);
+
+ elf = (Elf32_Ehdr *)sst_bin->data;
+
+ if ((elf->e_ident[0] != 0x7F) || (elf->e_ident[1] != 'E') ||
+ (elf->e_ident[2] != 'L') || (elf->e_ident[3] != 'F')) {
+ pr_debug("ELF Header Not found!%zu\n", sst_bin->size);
+ return -EINVAL;
+ }
+
+ if (dynamic == true) {
+ if (elf->e_type != ET_DYN) {
+ pr_err("Not a dynamic loadable library\n");
+ return -EINVAL;
+ }
+ }
+ pr_debug("Valid ELF Header...%zu\n", sst_bin->size);
+ return 0;
+}
+
+/**
+ * sst_validate_fw_image - validates the firmware signature
+ *
+ * @sst_fw_in_mem : pointer to audio FW
+ * @size : size of the firmware
+ * @module : points to the FW modules
+ * @num_modules : points to the num of modules
+ * This function validates the header signature in the FW image
+ */
+static int sst_validate_fw_image(const void *sst_fw_in_mem, unsigned long size,
+ struct fw_module_header **module, u32 *num_modules)
+{
+ struct fw_header *header;
+
+ pr_debug("%s\n", __func__);
+
+ /* Read the header information from the data pointer */
+ header = (struct fw_header *)sst_fw_in_mem;
+ pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%zx\n",
+ header->signature, header->file_size, header->modules,
+ header->file_format, sizeof(*header));
+
+ /* verify FW */
+ if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
+ (size != header->file_size + sizeof(*header))) {
+ /* Invalid FW signature */
+ pr_err("InvalidFW sign/filesize mismatch\n");
+ return -EINVAL;
+ }
+ *num_modules = header->modules;
+ *module = (void *)sst_fw_in_mem + sizeof(*header);
+
+ return 0;
+}
+
+/**
+ * sst_validate_library - validates the library signature
+ *
+ * @fw_lib : pointer to FW library
+ * @slot : pointer to the lib slot info
+ * @entry_point : out param, which contains the module entry point
+ * This function is called before downloading the codec/postprocessing
+ * library
+ */
+static int sst_validate_library(const struct firmware *fw_lib,
+ struct lib_slot_info *slot,
+ u32 *entry_point)
+{
+ struct fw_header *header;
+ struct fw_module_header *module;
+ struct fw_block_info *block;
+ unsigned int n_blk, isize = 0, dsize = 0;
+ int err = 0;
+
+ header = (struct fw_header *)fw_lib->data;
+ if (header->modules != 1) {
+ pr_err("Module no mismatch found\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ module = (void *)fw_lib->data + sizeof(*header);
+ *entry_point = module->entry_point;
+ pr_debug("Module entry point 0x%x\n", *entry_point);
+ pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
+ module->signature, module->mod_size,
+ module->blocks, module->type);
+
+ block = (void *)module + sizeof(*module);
+ for (n_blk = 0; n_blk < module->blocks; n_blk++) {
+ switch (block->type) {
+ case SST_IRAM:
+ isize += block->size;
+ break;
+ case SST_DRAM:
+ dsize += block->size;
+ break;
+ default:
+ pr_err("Invalid block type for 0x%x\n", n_blk);
+ err = -EINVAL;
+ goto exit;
+ }
+ block = (void *)block + sizeof(*block) + block->size;
+ }
+ if (isize > slot->iram_size || dsize > slot->dram_size) {
+ pr_err("library exceeds size allocated\n");
+ err = -EINVAL;
+ goto exit;
+ } else
+ pr_debug("Library is safe for download...\n");
+
+ pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
+ isize, dsize, slot->iram_size, slot->dram_size);
+exit:
+ return err;
+
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+ struct sst_dma *dma = (struct sst_dma *)param;
+
+ /* we only need MID_DMAC1 as that can access DSP RAMs*/
+ if (chan->device->dev == dma->dev)
+ return true;
+
+ return false;
+}
+
+static unsigned int
+sst_get_elf_sg_len(struct intel_sst_drv *sst, Elf32_Ehdr *elf, Elf32_Phdr *pr,
+ struct sst_info info)
+{
+ unsigned int i = 0, count = 0;
+
+ pr_debug("in %s: dma_max_len 0x%x\n", __func__, info.dma_max_len);
+
+ while (i < elf->e_phnum) {
+ if (pr[i].p_type == PT_LOAD) {
+
+ if ((pr[i].p_paddr >= info.iram_start) &&
+ (pr[i].p_paddr < info.iram_end &&
+ pr[i].p_filesz)) {
+ count += (pr[i].p_filesz) / info.dma_max_len;
+
+ if ((pr[i].p_filesz) % info.dma_max_len)
+ count++;
+
+ } else if ((pr[i].p_paddr >= info.dram_start) &&
+ (pr[i].p_paddr < info.dram_end &&
+ pr[i].p_filesz)) {
+ count += (pr[i].p_filesz) / info.dma_max_len;
+
+ if ((pr[i].p_filesz) % info.dma_max_len)
+ count++;
+
+ } else if ((pr[i].p_paddr >= info.imr_start) &&
+ (pr[i].p_paddr < info.imr_end &&
+ pr[i].p_filesz)) {
+ count += (pr[i].p_filesz) / info.dma_max_len;
+
+ if ((pr[i].p_filesz) % info.dma_max_len)
+ count++;
+ }
+ }
+ i++;
+ }
+
+ pr_debug("gotcha count %d\n", count);
+ return count;
+}
+
+static int
+sst_init_dma_sg_list(struct intel_sst_drv *sst, unsigned int len,
+ struct scatterlist **src, struct scatterlist **dstn)
+{
+ struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+
+ sg_src = kzalloc(sizeof(*sg_src)*(len), GFP_KERNEL);
+ if (NULL == sg_src)
+ return -ENOMEM;
+ sg_init_table(sg_src, len);
+ sg_dst = kzalloc(sizeof(*sg_dst)*(len), GFP_KERNEL);
+ if (NULL == sg_dst) {
+ kfree(sg_src);
+ return -ENOMEM;
+ }
+ sg_init_table(sg_dst, len);
+ *src = sg_src;
+ *dstn = sg_dst;
+
+ return 0;
+}
+
+static int sst_alloc_dma_chan(struct sst_dma *dma)
+{
+ dma_cap_mask_t mask;
+ struct intel_mid_dma_slave *slave = &dma->slave;
+ int retval;
+ struct pci_dev *dmac = NULL;
+ const char *hid;
+
+ pr_debug("%s\n", __func__);
+ dma->dev = NULL;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DMAC_CLV_ID, NULL);
+ else if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID)
+ dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DMAC_MRFLD_ID, NULL);
+ else if (sst_drv_ctx->pci_id == SST_BYT_PCI_ID) {
+ hid = sst_drv_ctx->hid;
+ if (!strncmp(hid, "LPE0F281", 8))
+ dma->dev = intel_mid_get_acpi_dma("DMA0F28");
+ else if (!strncmp(hid, "80860F28", 8))
+ dma->dev = intel_mid_get_acpi_dma("ADMA0F28");
+ else if (!strncmp(hid, "808622A8", 8))
+ dma->dev = intel_mid_get_acpi_dma("ADMA22A8");
+ else if (!strncmp(hid, "LPE0F28", 7))
+ dma->dev = intel_mid_get_acpi_dma("DMA0F28");
+ }
+
+ if (!dmac && !dma->dev) {
+ pr_err("Can't find DMAC\n");
+ return -ENODEV;
+ }
+ if (dmac)
+ dma->dev = &dmac->dev;
+
+ dma->ch = dma_request_channel(mask, chan_filter, dma);
+ if (!dma->ch) {
+ pr_err("unable to request dma channel\n");
+ return -EIO;
+ }
+
+ slave->dma_slave.direction = DMA_MEM_TO_MEM;
+ slave->hs_mode = 0;
+ slave->cfg_mode = LNW_DMA_MEM_TO_MEM;
+ slave->dma_slave.src_addr_width = slave->dma_slave.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave->dma_slave.src_maxburst = slave->dma_slave.dst_maxburst =
+ LNW_DMA_MSIZE_16;
+
+ retval = dmaengine_slave_config(dma->ch, &slave->dma_slave);
+ if (retval) {
+ pr_err("unable to set slave config, err %d\n", retval);
+ dma_release_channel(dma->ch);
+ return -EIO;
+ }
+ return retval;
+}
+
+static void sst_dma_transfer_complete(void *arg)
+{
+ sst_drv_ctx = (struct intel_sst_drv *)arg;
+ pr_debug(" sst_dma_transfer_complete\n");
+ sst_wake_up_block(sst_drv_ctx, 0, FW_DWNL_ID, FW_DWNL_ID, NULL, 0);
+}
+
+static inline int sst_dma_wait_for_completion(struct intel_sst_drv *sst)
+{
+ int ret = 0;
+ struct sst_block *block;
+ /* call prep and wait */
+ sst->desc->callback = sst_dma_transfer_complete;
+ sst->desc->callback_param = sst;
+
+ block = sst_create_block(sst, FW_DWNL_ID, FW_DWNL_ID);
+ if (block == NULL)
+ return -ENOMEM;
+
+ sst->desc->tx_submit(sst_drv_ctx->desc);
+ ret = sst_wait_timeout(sst, block);
+ if (ret)
+ dma_wait_for_async_tx(sst_drv_ctx->desc);
+ sst_free_block(sst, block);
+ return ret;
+}
+
+static int sst_dma_firmware(struct sst_dma *dma, struct sst_sg_list *sg_list)
+{
+ int retval = 0;
+ enum dma_ctrl_flags flag = DMA_CTRL_ACK;
+ struct scatterlist *sg_src_list, *sg_dst_list;
+ int length;
+ pr_debug("%s: use_lli %d\n", __func__, sst_drv_ctx->use_lli);
+
+ sg_src_list = sg_list->src;
+ sg_dst_list = sg_list->dst;
+ length = sg_list->list_len;
+
+ /* BY default PIMR is unsmasked
+ * FW gets unmaksed dma intr too, so mask it for FW to execute on mrfld
+ */
+ if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID ||
+ sst_drv_ctx->pci_id == SST_BYT_PCI_ID)
+ sst_shim_write(sst_drv_ctx->shim, SST_PIMR, 0xFFFF0034);
+
+ if (sst_drv_ctx->use_lli) {
+ sst_drv_ctx->desc = dma->ch->device->device_prep_dma_sg(dma->ch,
+ sg_dst_list, length,
+ sg_src_list, length, flag);
+ if (!sst_drv_ctx->desc)
+ return -EFAULT;
+ retval = sst_dma_wait_for_completion(sst_drv_ctx);
+ if (retval)
+ pr_err("sst_dma_firmware..timeout!\n");
+ } else {
+ struct scatterlist *sg;
+ dma_addr_t src_addr, dstn_addr;
+ int i = 0;
+
+ /* dma single block mode */
+ for_each_sg(sg_src_list, sg, length, i) {
+ pr_debug("dma desc %d, length %d\n", i, sg->length);
+ src_addr = sg_phys(sg);
+ dstn_addr = sg_phys(sg_dst_list);
+ if (sg_dst_list)
+ sg_dst_list = sg_next(sg_dst_list);
+ sst_drv_ctx->desc = dma->ch->device->device_prep_dma_memcpy(
+ dma->ch, dstn_addr, src_addr, sg->length, flag);
+ if (!sst_drv_ctx->desc)
+ return -EFAULT;
+ retval = sst_dma_wait_for_completion(sst_drv_ctx);
+ if (retval)
+ pr_err("sst_dma_firmware..timeout!\n");
+
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * sst_fill_sglist - Fill the sg list
+ *
+ * @from: src address of the fw
+ * @to: virtual address of IRAM/DRAM
+ * @block_size: size of the block
+ * @sg_src: source scatterlist pointer
+ * @sg_dst: Destination scatterlist pointer
+ * @fw_sg_list: Pointer to the sg_list
+ * @dma_max_len: maximum len of the DMA block
+ *
+ * Parses modules that need to be placed in SST IRAM and DRAM
+ * and stores them in a sg list for transfer
+ * returns error or 0 if list creation fails or pass.
+ */
+static int sst_fill_sglist(unsigned long from, unsigned long to,
+ u32 block_size, struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+ struct sst_sg_list *fw_sg_list, u32 dma_max_len)
+{
+ u32 offset = 0;
+ int len = 0;
+ unsigned long dstn, src;
+
+ pr_debug("%s entry", __func__);
+ if (!sg_src || !sg_dstn)
+ return -EINVAL;
+
+ do {
+ dstn = (unsigned long) (to + offset);
+ src = (unsigned long) (from + offset);
+
+ /* split blocks to dma_max_len */
+
+ len = block_size - offset;
+ pr_debug("DMA blk src %lx,dstn %lx,len %d,offset %d, size %d\n",
+ src, dstn, len, offset, block_size);
+ if (len > dma_max_len) {
+ pr_debug("block size exceeds %d\n", dma_max_len);
+ len = dma_max_len;
+ offset += len;
+ } else {
+ pr_debug("Node length less that %d\n", dma_max_len);
+ offset = 0;
+ }
+
+ if (!(*sg_src) || !(*sg_dstn))
+ return -ENOMEM;
+
+ sg_set_page(*sg_src, virt_to_page((void *) src), len,
+ offset_in_page((void *) src));
+ sg_set_page(*sg_dstn, virt_to_page((void *) dstn), len,
+ offset_in_page((void *) dstn));
+
+ *sg_src = sg_next(*sg_src);
+ *sg_dstn = sg_next(*sg_dstn);
+
+ /* TODO: is sg_idx required? */
+ if (sst_drv_ctx->info.use_elf == true)
+ fw_sg_list->sg_idx++;
+ } while (offset > 0);
+
+ return 0;
+}
+
+static int sst_parse_elf_module_dma(struct intel_sst_drv *sst, const void *fw,
+ struct sst_info info, Elf32_Phdr *pr,
+ struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+ struct sst_sg_list *fw_sg_list)
+{
+ unsigned long dstn, src;
+ unsigned int dstn_phys;
+ int ret_val = 0;
+ int mem_type;
+
+ ret_val = sst_fill_dstn(sst, info, pr, (void *)&dstn, &dstn_phys, &mem_type);
+ if (ret_val)
+ return ret_val;
+
+ dstn = (unsigned long) phys_to_virt(dstn_phys);
+ src = (unsigned long) (fw + pr->p_offset);
+
+ ret_val = sst_fill_sglist(src, dstn, pr->p_filesz,
+ sg_src, sg_dstn, fw_sg_list, sst->info.dma_max_len);
+
+ return ret_val;
+}
+
+static int
+sst_parse_elf_fw_dma(struct intel_sst_drv *sst, const void *fw_in_mem,
+ struct sst_sg_list *fw_sg_list)
+{
+ int i = 0, ret = 0;
+ Elf32_Ehdr *elf;
+ Elf32_Phdr *pr;
+ struct sst_info info;
+ struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+ unsigned int sg_len;
+
+ BUG_ON(!fw_in_mem);
+
+ elf = (Elf32_Ehdr *)fw_in_mem;
+ pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+ pr_debug("%s entry\n", __func__);
+
+ sst_fill_info(sst, &info);
+
+ sg_len = sst_get_elf_sg_len(sst, elf, pr, info);
+ if (sg_len == 0) {
+ pr_err("we got NULL sz ELF, abort\n");
+ return -EIO;
+ }
+
+ if (sst_init_dma_sg_list(sst, sg_len, &sg_src, &sg_dst)) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ fw_sg_list->src = sg_src;
+ fw_sg_list->dst = sg_dst;
+ fw_sg_list->list_len = sg_len;
+ fw_sg_list->sg_idx = 0;
+
+ while (i < elf->e_phnum) {
+ if ((pr[i].p_type == PT_LOAD) && (pr[i].p_filesz)) {
+ ret = sst_parse_elf_module_dma(sst, fw_in_mem, info,
+ &pr[i], &sg_src, &sg_dst, fw_sg_list);
+ if (ret)
+ goto err;
+ }
+ i++;
+ }
+ return 0;
+err:
+ kfree(fw_sg_list->src);
+ kfree(fw_sg_list->dst);
+err1:
+ fw_sg_list->src = NULL;
+ fw_sg_list->dst = NULL;
+ fw_sg_list->list_len = 0;
+ fw_sg_list->sg_idx = 0;
+
+ return ret;
+}
+
+/**
+ * sst_parse_module_dma - Parse audio FW modules and populate the dma list
+ *
+ * @sst_ctx : sst driver context
+ * @module : FW module header
+ * @sg_list : Pointer to the sg_list to be populated
+ * Count the length for scattergather list
+ * and create the scattergather list of same length
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_dma(struct intel_sst_drv *sst_ctx,
+ struct fw_module_header *module,
+ struct sst_sg_list *sg_list)
+{
+ struct fw_block_info *block;
+ u32 count;
+ unsigned long ram, src;
+ int retval, sg_len = 0;
+ struct scatterlist *sg_src, *sg_dst;
+
+ pr_debug("module sign %s size %x blocks %x type %x\n",
+ module->signature, module->mod_size,
+ module->blocks, module->type);
+ pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+ block = (void *)module + sizeof(*module);
+
+ for (count = 0; count < module->blocks; count++) {
+ sg_len += (block->size) / sst_drv_ctx->info.dma_max_len;
+
+ if (block->size % sst_drv_ctx->info.dma_max_len)
+ sg_len = sg_len + 1;
+ block = (void *)block + sizeof(*block) + block->size;
+ }
+
+ if (sst_init_dma_sg_list(sst_ctx, sg_len, &sg_src, &sg_dst)) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ sg_list->src = sg_src;
+ sg_list->dst = sg_dst;
+ sg_list->list_len = sg_len;
+
+ block = (void *)module + sizeof(*module);
+
+ for (count = 0; count < module->blocks; count++) {
+ if (block->size <= 0) {
+ pr_err("block size invalid\n");
+ retval = -EINVAL;
+ goto err;
+ }
+ switch (block->type) {
+ case SST_IRAM:
+ ram = sst_ctx->iram_base;
+ break;
+ case SST_DRAM:
+ ram = sst_ctx->dram_base;
+ break;
+ default:
+ pr_err("wrong ram type0x%x in block0x%x\n",
+ block->type, count);
+ retval = -EINVAL;
+ goto err;
+ }
+
+ /*converting from physical to virtual because
+ scattergather list works on virtual pointers*/
+ ram = (unsigned long) phys_to_virt(ram);
+ ram = (unsigned long)(ram + block->ram_offset);
+ src = (unsigned long) (void *)block + sizeof(*block);
+
+ retval = sst_fill_sglist(src, ram,
+ block->size, &sg_src, &sg_dst,
+ sg_list, sst_ctx->info.dma_max_len);
+ if (retval)
+ goto err;
+
+ block = (void *)block + sizeof(*block) + block->size;
+ }
+ return 0;
+err:
+ kfree(sg_list->src);
+ kfree(sg_list->dst);
+err1:
+ sg_list->src = NULL;
+ sg_list->dst = NULL;
+ sg_list->list_len = 0;
+
+ return retval;
+}
+
+/**
+ * sst_parse_fw_dma - parse the firmware image & populate the list for dma
+ *
+ * @sst_fw_in_mem : pointer to audio fw
+ * @size : size of the firmware
+ * @fw_list : pointer to sst_sg_list to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for dma
+ */
+static int sst_parse_fw_dma(const void *sst_fw_in_mem, unsigned long size,
+ struct sst_sg_list *fw_list)
+{
+ struct fw_module_header *module;
+ u32 count, num_modules;
+ int ret_val;
+
+ ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+ &module, &num_modules);
+ if (ret_val)
+ return ret_val;
+
+ for (count = 0; count < num_modules; count++) {
+ /* module */
+ ret_val = sst_parse_module_dma(sst_drv_ctx, module, fw_list);
+ if (ret_val)
+ return ret_val;
+ module = (void *)module + sizeof(*module) + module->mod_size ;
+ }
+
+ return 0;
+}
+
+static void sst_dma_free_resources(struct sst_dma *dma)
+{
+ pr_debug("entry:%s\n", __func__);
+
+ dma_release_channel(dma->ch);
+}
+
+void sst_fill_config(struct intel_sst_drv *sst_ctx, unsigned int offset)
+{
+ struct sst_fill_config sst_config;
+
+ if (!(sst_ctx->pdata->bdata && sst_ctx->pdata->pdata))
+ return;
+
+ sst_config.sign = SST_CONFIG_SSP_SIGN;
+ memcpy(&sst_config.sst_bdata, sst_ctx->pdata->bdata, sizeof(struct sst_board_config_data));
+ memcpy(&sst_config.sst_pdata, sst_ctx->pdata->pdata, sizeof(struct sst_platform_config_data));
+ sst_config.shim_phy_add = sst_ctx->shim_phy_add;
+ sst_config.mailbox_add = sst_ctx->mailbox_add;
+ MEMCPY_TOIO(sst_ctx->dram + offset, &sst_config, sizeof(sst_config));
+
+}
+
+/**
+ * sst_do_dma - function allocs and initiates the DMA
+ *
+ * @sg_list: Pointer to dma list on which the dma needs to be initiated
+ *
+ * Triggers the DMA
+ */
+static int sst_do_dma(struct sst_sg_list *sg_list)
+{
+ int ret_val;
+
+ /* get a dmac channel */
+ ret_val = sst_alloc_dma_chan(&sst_drv_ctx->dma);
+ if (ret_val)
+ return ret_val;
+
+ /* allocate desc for transfer and submit */
+ ret_val = sst_dma_firmware(&sst_drv_ctx->dma, sg_list);
+
+ sst_dma_free_resources(&sst_drv_ctx->dma);
+
+ return ret_val;
+}
+
+/*
+ * sst_fill_memcpy_list - Fill the memcpy list
+ *
+ * @memcpy_list: List to be filled
+ * @destn: Destination addr to be filled in the list
+ * @src: Source addr to be filled in the list
+ * @size: Size to be filled in the list
+ *
+ * Adds the node to the list after required fields
+ * are populated in the node
+ */
+
+static int sst_fill_memcpy_list(struct list_head *memcpy_list,
+ void *destn, const void *src, u32 size, bool is_io)
+{
+ struct sst_memcpy_list *listnode;
+
+ listnode = kzalloc(sizeof(*listnode), GFP_KERNEL);
+ if (listnode == NULL)
+ return -ENOMEM;
+ listnode->dstn = destn;
+ listnode->src = src;
+ listnode->size = size;
+ listnode->is_io = is_io;
+ list_add_tail(&listnode->memcpylist, memcpy_list);
+
+ return 0;
+}
+
+static int sst_parse_elf_module_memcpy(struct intel_sst_drv *sst,
+ const void *fw, struct sst_info info, Elf32_Phdr *pr,
+ struct list_head *memcpy_list)
+{
+ void *dstn;
+ unsigned int dstn_phys;
+ int ret_val = 0;
+ int mem_type;
+
+ ret_val = sst_fill_dstn(sst, info, pr, &dstn, &dstn_phys, &mem_type);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = sst_fill_memcpy_list(memcpy_list, dstn,
+ (void *)fw + pr->p_offset, pr->p_filesz, mem_type);
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+static int
+sst_parse_elf_fw_memcpy(struct intel_sst_drv *sst, const void *fw_in_mem,
+ struct list_head *memcpy_list)
+{
+ int i = 0;
+
+ Elf32_Ehdr *elf;
+ Elf32_Phdr *pr;
+ struct sst_info info;
+
+ BUG_ON(!fw_in_mem);
+
+ elf = (Elf32_Ehdr *)fw_in_mem;
+ pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+ pr_debug("%s entry\n", __func__);
+
+ sst_fill_info(sst, &info);
+
+ while (i < elf->e_phnum) {
+ if (pr[i].p_type == PT_LOAD)
+ sst_parse_elf_module_memcpy(sst, fw_in_mem, info,
+ &pr[i], memcpy_list);
+ i++;
+ }
+ return 0;
+}
+
+/**
+ * sst_parse_module_memcpy - Parse audio FW modules and populate the memcpy list
+ *
+ * @module : FW module header
+ * @memcpy_list : Pointer to the list to be populated
+ * Create the memcpy list as the number of block to be copied
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_memcpy(struct fw_module_header *module,
+ struct list_head *memcpy_list)
+{
+ struct fw_block_info *block;
+ u32 count;
+ int ret_val = 0;
+ void __iomem *ram_iomem;
+
+ pr_debug("module sign %s size %x blocks %x type %x\n",
+ module->signature, module->mod_size,
+ module->blocks, module->type);
+ pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+ block = (void *)module + sizeof(*module);
+
+ for (count = 0; count < module->blocks; count++) {
+ if (block->size <= 0) {
+ pr_err("block size invalid\n");
+ return -EINVAL;
+ }
+ switch (block->type) {
+ case SST_IRAM:
+ ram_iomem = sst_drv_ctx->iram;
+ break;
+ case SST_DRAM:
+ ram_iomem = sst_drv_ctx->dram;
+ break;
+ default:
+ pr_err("wrong ram type0x%x in block0x%x\n",
+ block->type, count);
+ return -EINVAL;
+ }
+
+ ret_val = sst_fill_memcpy_list(memcpy_list,
+ ram_iomem + block->ram_offset,
+ (void *)block + sizeof(*block), block->size, 1);
+ if (ret_val)
+ return ret_val;
+
+ block = (void *)block + sizeof(*block) + block->size;
+ }
+ return 0;
+}
+
+/**
+ * sst_parse_fw_memcpy - parse the firmware image & populate the list for memcpy
+ *
+ * @sst_fw_in_mem : pointer to audio fw
+ * @size : size of the firmware
+ * @fw_list : pointer to list_head to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for memcpy
+ */
+static int sst_parse_fw_memcpy(const void *sst_fw_in_mem, unsigned long size,
+ struct list_head *fw_list)
+{
+ struct fw_module_header *module;
+ u32 count, num_modules;
+ int ret_val;
+
+ ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+ &module, &num_modules);
+ if (ret_val)
+ return ret_val;
+
+ for (count = 0; count < num_modules; count++) {
+ /* module */
+ ret_val = sst_parse_module_memcpy(module, fw_list);
+ if (ret_val)
+ return ret_val;
+ module = (void *)module + sizeof(*module) + module->mod_size ;
+ }
+
+ return 0;
+}
+
+/**
+ * sst_do_memcpy - function initiates the memcpy
+ *
+ * @memcpy_list: Pter to memcpy list on which the memcpy needs to be initiated
+ *
+ * Triggers the memcpy
+ */
+static void sst_do_memcpy(struct list_head *memcpy_list)
+{
+ struct sst_memcpy_list *listnode;
+
+ list_for_each_entry(listnode, memcpy_list, memcpylist) {
+ if (listnode->is_io == true)
+ MEMCPY_TOIO((void __iomem *)listnode->dstn, listnode->src,
+ listnode->size);
+ else
+ memcpy(listnode->dstn, listnode->src, listnode->size);
+ }
+}
+
+static void sst_memcpy_free_lib_resources(void)
+{
+ struct sst_memcpy_list *listnode, *tmplistnode;
+
+ pr_debug("entry:%s\n", __func__);
+
+ /*Free the list*/
+ if (!list_empty(&sst_drv_ctx->libmemcpy_list)) {
+ list_for_each_entry_safe(listnode, tmplistnode,
+ &sst_drv_ctx->libmemcpy_list, memcpylist) {
+ list_del(&listnode->memcpylist);
+ kfree(listnode);
+ }
+ }
+}
+
+void sst_memcpy_free_resources(void)
+{
+ struct sst_memcpy_list *listnode, *tmplistnode;
+
+ pr_debug("entry:%s\n", __func__);
+
+ /*Free the list*/
+ if (!list_empty(&sst_drv_ctx->memcpy_list)) {
+ list_for_each_entry_safe(listnode, tmplistnode,
+ &sst_drv_ctx->memcpy_list, memcpylist) {
+ list_del(&listnode->memcpylist);
+ kfree(listnode);
+ }
+ }
+ sst_memcpy_free_lib_resources();
+}
+
+void sst_firmware_load_cb(const struct firmware *fw, void *context)
+{
+ struct intel_sst_drv *ctx = context;
+ int ret = 0;
+
+ pr_debug("In %s\n", __func__);
+
+ if (fw == NULL) {
+ pr_err("request fw failed\n");
+ goto out;
+ }
+
+ if (sst_drv_ctx->sst_state != SST_UN_INIT ||
+ ctx->fw_in_mem != NULL)
+ goto exit;
+
+ pr_debug("Request Fw completed\n");
+ trace_sst_fw_download("End of FW request", ctx->sst_state);
+
+ if (ctx->info.use_elf == true)
+ ret = sst_validate_elf(fw, false);
+
+ if (ret != 0) {
+ pr_err("FW image invalid...\n");
+ goto out;
+ }
+
+ ctx->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+ if (!ctx->fw_in_mem) {
+ pr_err("%s unable to allocate memory\n", __func__);
+ goto out;
+ }
+
+ pr_debug("copied fw to %p", ctx->fw_in_mem);
+ pr_debug("phys: %lx", (unsigned long)virt_to_phys(ctx->fw_in_mem));
+ memcpy(ctx->fw_in_mem, fw->data, fw->size);
+
+ trace_sst_fw_download("Start FW parsing", ctx->sst_state);
+ if (ctx->use_dma) {
+ if (ctx->info.use_elf == true)
+ ret = sst_parse_elf_fw_dma(ctx, ctx->fw_in_mem,
+ &ctx->fw_sg_list);
+ else
+ ret = sst_parse_fw_dma(ctx->fw_in_mem, fw->size,
+ &ctx->fw_sg_list);
+ } else {
+ if (ctx->info.use_elf == true)
+ ret = sst_parse_elf_fw_memcpy(ctx, ctx->fw_in_mem,
+ &ctx->memcpy_list);
+ else
+ ret = sst_parse_fw_memcpy(ctx->fw_in_mem, fw->size,
+ &ctx->memcpy_list);
+ }
+ trace_sst_fw_download("End FW parsing", ctx->sst_state);
+ if (ret) {
+ kfree(ctx->fw_in_mem);
+ ctx->fw_in_mem = NULL;
+ goto out;
+ }
+
+ /* If static module download(download at boot time) is supported,
+ * set the flag to indicate lib download is to be done
+ */
+ if (ctx->pdata->lib_info)
+ if (ctx->pdata->lib_info->mod_ddr_dnld)
+ ctx->lib_dwnld_reqd = true;
+
+ sst_set_fw_state_locked(sst_drv_ctx, SST_FW_LIB_LOAD);
+ goto exit;
+out:
+ sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+exit:
+ if (fw != NULL)
+ release_firmware(fw);
+}
+
+/*
+ * sst_request_fw - requests audio fw from kernel and saves a copy
+ *
+ * This function requests the SST FW from the kernel, parses it and
+ * saves a copy in the driver context
+ */
+static int sst_request_fw(struct intel_sst_drv *sst)
+{
+ int retval = 0;
+ char name[20];
+ const struct firmware *fw;
+
+ snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
+ sst->pci_id, ".bin");
+ pr_debug("Requesting FW %s now...\n", name);
+
+ retval = request_firmware(&fw, name, sst->dev);
+ if (fw == NULL) {
+ pr_err("fw is returning as null\n");
+ return -EINVAL;
+ }
+ if (retval) {
+ pr_err("request fw failed %d\n", retval);
+ return retval;
+ }
+ trace_sst_fw_download("End of FW request", sst->sst_state);
+ if (sst->info.use_elf == true)
+ retval = sst_validate_elf(fw, false);
+ if (retval != 0) {
+ pr_err("FW image invalid...\n");
+ goto end_release;
+ }
+ sst->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+ if (!sst->fw_in_mem) {
+ pr_err("%s unable to allocate memory\n", __func__);
+ retval = -ENOMEM;
+ goto end_release;
+ }
+ pr_debug("copied fw to %p", sst->fw_in_mem);
+ pr_debug("phys: %lx", (unsigned long)virt_to_phys(sst->fw_in_mem));
+ memcpy(sst->fw_in_mem, fw->data, fw->size);
+ trace_sst_fw_download("Start FW parsing", sst->sst_state);
+ if (sst->use_dma) {
+ if (sst->info.use_elf == true)
+ retval = sst_parse_elf_fw_dma(sst, sst->fw_in_mem,
+ &sst->fw_sg_list);
+ else
+ retval = sst_parse_fw_dma(sst->fw_in_mem, fw->size,
+ &sst->fw_sg_list);
+ } else {
+ if (sst->info.use_elf == true)
+ retval = sst_parse_elf_fw_memcpy(sst, sst->fw_in_mem,
+ &sst->memcpy_list);
+ else
+ retval = sst_parse_fw_memcpy(sst->fw_in_mem, fw->size,
+ &sst->memcpy_list);
+ }
+ trace_sst_fw_download("End FW parsing", sst->sst_state);
+ if (retval) {
+ kfree(sst->fw_in_mem);
+ sst->fw_in_mem = NULL;
+ }
+
+ /* If static module download(download at boot time) is supported,
+ * set the flag to indicate lib download is to be done
+ */
+ if (sst->pdata->lib_info)
+ if (sst->pdata->lib_info->mod_ddr_dnld)
+ sst->lib_dwnld_reqd = true;
+end_release:
+ release_firmware(fw);
+ return retval;
+}
+
+static inline void print_lib_info(struct snd_sst_lib_download_info *resp)
+{
+ pr_debug("codec Type %d Ver %d Built %s: %s\n",
+ resp->dload_lib.lib_info.lib_type,
+ resp->dload_lib.lib_info.lib_version,
+ resp->dload_lib.lib_info.b_date,
+ resp->dload_lib.lib_info.b_time);
+}
+
+/* sst_download_library - This function is called when any
+ codec/post processing library needs to be downloaded */
+static int sst_download_library(const struct firmware *fw_lib,
+ struct snd_sst_lib_download_info *lib)
+{
+ int ret_val = 0;
+
+ /* send IPC message and wait */
+ u8 pvt_id;
+ struct ipc_post *msg = NULL;
+ union config_status_reg csr;
+ struct snd_sst_str_type str_type = {0};
+ int retval = 0;
+ void *codec_fw;
+ struct sst_block *block;
+
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ ret_val = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+ IPC_IA_PREP_LIB_DNLD, pvt_id);
+ if (ret_val) {
+ pr_err("library download failed\n");
+ return ret_val;
+ }
+
+ sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
+ msg->header.part.data = sizeof(u32) + sizeof(str_type);
+ str_type.codec_type = lib->dload_lib.lib_info.lib_type;
+ /*str_type.pvt_id = pvt_id;*/
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ if (block->data) {
+ struct snd_sst_str_type *str_type =
+ (struct snd_sst_str_type *)block->data;
+ if (str_type->result) {
+ /* error */
+ pr_err("Prep codec downloaded failed %d\n",
+ str_type->result);
+ retval = -EIO;
+ goto free_block;
+ }
+ kfree(block->data);
+ } else if (retval != 0) {
+ retval = -EIO;
+ goto free_block;
+ }
+ pr_debug("FW responded, ready for download now...\n");
+ codec_fw = kzalloc(fw_lib->size, GFP_KERNEL);
+ if (!codec_fw) {
+ memset(lib, 0, sizeof(*lib));
+ retval = -ENOMEM;
+ goto send_ipc;
+ }
+ memcpy(codec_fw, fw_lib->data, fw_lib->size);
+
+ if (sst_drv_ctx->use_dma)
+ retval = sst_parse_fw_dma(codec_fw, fw_lib->size,
+ &sst_drv_ctx->library_list);
+ else
+ retval = sst_parse_fw_memcpy(codec_fw, fw_lib->size,
+ &sst_drv_ctx->libmemcpy_list);
+
+ if (retval) {
+ memset(lib, 0, sizeof(*lib));
+ goto send_ipc;
+ }
+
+ /* downloading on success */
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_drv_ctx->sst_state = SST_FW_LOADED;
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
+ csr.part.run_stall = 1;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.bypass = 0x7;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+
+ if (sst_drv_ctx->use_dma) {
+ ret_val = sst_do_dma(&sst_drv_ctx->library_list);
+ if (ret_val) {
+ pr_err("sst_do_dma failed, abort\n");
+ memset(lib, 0, sizeof(*lib));
+ }
+ } else
+ sst_do_memcpy(&sst_drv_ctx->libmemcpy_list);
+ /* set the FW to running again */
+ mutex_lock(&sst_drv_ctx->csr_lock);
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.bypass = 0x0;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.part.run_stall = 0;
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+ mutex_unlock(&sst_drv_ctx->csr_lock);
+send_ipc:
+ /* send download complete and wait */
+ if (sst_create_ipc_msg(&msg, true)) {
+ retval = -ENOMEM;
+ goto free_resources;
+ }
+
+ block->condition = false;
+ block->msg_id = IPC_IA_LIB_DNLD_CMPLT;
+ sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
+ msg->header.part.data = sizeof(u32) + sizeof(*lib);
+ lib->pvt_id = pvt_id;
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ pr_debug("Waiting for FW response Download complete\n");
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ sst_drv_ctx->sst_state = SST_FW_RUNNING;
+ if (block->data) {
+ struct snd_sst_lib_download_info *resp = block->data;
+ retval = resp->result;
+ if (retval) {
+ pr_err("err in lib dload %x\n", resp->result);
+ goto free_resources;
+ } else {
+ pr_debug("Codec download complete...\n");
+ print_lib_info(resp);
+ }
+ } else if (retval) {
+ /* error */
+ retval = -EIO;
+ goto free_resources;
+ }
+
+ pr_debug("FW success on Download complete\n");
+
+free_resources:
+ if (sst_drv_ctx->use_dma) {
+ kfree(sst_drv_ctx->library_list.src);
+ kfree(sst_drv_ctx->library_list.dst);
+ sst_drv_ctx->library_list.list_len = 0;
+ }
+
+ kfree(codec_fw);
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+free_block:
+ sst_free_block(sst_drv_ctx, block);
+ return retval;
+}
+
+/*
+ * Writing the DDR physical base to DCCM offset
+ * so that FW can use it to setup TLB
+ */
+static void sst_dccm_config_write(void __iomem *dram_base, unsigned int ddr_base)
+{
+ void __iomem *addr;
+ u32 bss_reset = 0;
+
+ addr = (void __iomem *)(dram_base + MRFLD_FW_DDR_BASE_OFFSET);
+ MEMCPY_TOIO(addr, (void *)&ddr_base, sizeof(u32));
+ bss_reset |= (1 << MRFLD_FW_BSS_RESET_BIT);
+ addr = (void __iomem *)(dram_base + MRFLD_FW_FEATURE_BASE_OFFSET);
+ MEMCPY_TOIO(addr, &bss_reset, sizeof(u32));
+ pr_debug("%s: config written to DCCM\n", __func__);
+}
+
+void sst_post_download_mrfld(struct intel_sst_drv *ctx)
+{
+ sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+ /* For mrfld, download all libraries the first time fw is
+ * downloaded */
+ pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+ if (ctx->lib_dwnld_reqd) {
+ sst_load_all_modules_elf(ctx, sst_modules_mrfld, ARRAY_SIZE(sst_modules_mrfld));
+ ctx->lib_dwnld_reqd = false;
+ }
+}
+
+void sst_post_download_ctp(struct intel_sst_drv *ctx)
+{
+ sst_fill_config(ctx, 0);
+}
+
+void sst_post_download_byt(struct intel_sst_drv *ctx)
+{
+ sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+ sst_fill_config(ctx, 2 * sizeof(u32));
+
+ pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+ if (ctx->lib_dwnld_reqd) {
+ sst_load_all_modules_elf(ctx, sst_modules_byt,
+ ARRAY_SIZE(sst_modules_byt));
+ ctx->lib_dwnld_reqd = false;
+ }
+}
+
+static void sst_init_lib_mem_mgr(struct intel_sst_drv *ctx)
+{
+ struct sst_mem_mgr *mgr = &ctx->lib_mem_mgr;
+ const struct sst_lib_dnld_info *lib_info = ctx->pdata->lib_info;
+
+ memset(mgr, 0, sizeof(*mgr));
+ mgr->current_base = lib_info->mod_base + lib_info->mod_table_offset
+ + lib_info->mod_table_size;
+ mgr->avail = lib_info->mod_end - mgr->current_base + 1;
+
+ pr_debug("current base = 0x%lx , avail = 0x%x\n",
+ (unsigned long)mgr->current_base, mgr->avail);
+}
+
+/**
+ * sst_load_fw - function to load FW into DSP
+ *
+ *
+ * Transfers the FW to DSP using dma/memcpy
+ */
+int sst_load_fw(void)
+{
+ int ret_val = 0;
+ struct sst_block *block;
+
+ pr_debug("sst_load_fw\n");
+
+ if ((sst_drv_ctx->sst_state != SST_START_INIT &&
+ sst_drv_ctx->sst_state != SST_FW_LIB_LOAD) ||
+ sst_drv_ctx->sst_state == SST_SHUTDOWN)
+ return -EAGAIN;
+
+ if (!sst_drv_ctx->fw_in_mem) {
+ if (sst_drv_ctx->sst_state != SST_START_INIT) {
+ /* even wake*/
+ pr_err("sst : wait for FW to be downloaded\n");
+ return -EBUSY;
+ } else {
+ trace_sst_fw_download("Req FW sent in check device",
+ sst_drv_ctx->sst_state);
+ pr_debug("sst: FW not in memory retry to download\n");
+ ret_val = sst_request_fw(sst_drv_ctx);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ BUG_ON(!sst_drv_ctx->fw_in_mem);
+ block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
+ if (block == NULL)
+ return -ENOMEM;
+
+ /* Prevent C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+
+ ret_val = sst_drv_ctx->ops->reset();
+ if (ret_val)
+ goto restore;
+
+ trace_sst_fw_download("Start FW copy", sst_drv_ctx->sst_state);
+ if (sst_drv_ctx->use_dma) {
+ ret_val = sst_do_dma(&sst_drv_ctx->fw_sg_list);
+ if (ret_val) {
+ pr_err("sst_do_dma failed, abort\n");
+ goto restore;
+ }
+ } else {
+ sst_do_memcpy(&sst_drv_ctx->memcpy_list);
+ }
+
+ trace_sst_fw_download("Post download for Lib start",
+ sst_drv_ctx->sst_state);
+ /* Write the DRAM/DCCM config before enabling FW */
+ if (sst_drv_ctx->ops->post_download)
+ sst_drv_ctx->ops->post_download(sst_drv_ctx);
+ trace_sst_fw_download("Post download for Lib end",
+ sst_drv_ctx->sst_state);
+ sst_drv_ctx->sst_state = SST_FW_LOADED;
+
+ /* bring sst out of reset */
+ ret_val = sst_drv_ctx->ops->start();
+ if (ret_val)
+ goto restore;
+ trace_sst_fw_download("DSP reset done",
+ sst_drv_ctx->sst_state);
+
+ ret_val = sst_wait_timeout(sst_drv_ctx, block);
+ if (ret_val) {
+ pr_err("fw download failed %d\n" , ret_val);
+ /* assume FW d/l failed due to timeout*/
+ ret_val = -EBUSY;
+
+ }
+
+restore:
+ /* Re-enable Deeper C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+ sst_free_block(sst_drv_ctx, block);
+
+ return ret_val;
+}
+
+/**
+ * sst_load_library - function to load FW into DSP
+ *
+ * @lib: Pointer to the lib download structure
+ * @ops: Contains the stream ops
+ * This function is called when FW requests for a particular library download
+ * This function prepares & downloads the library
+ */
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
+{
+ char buf[20];
+ const char *type, *dir;
+ int len = 0, error = 0;
+ u32 entry_point;
+ const struct firmware *fw_lib;
+ struct snd_sst_lib_download_info dload_info = {{{0},},};
+
+ memset(buf, 0, sizeof(buf));
+
+ pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
+ lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
+ pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
+ lib->lib_info.lib_version, lib->lib_info.lib_name,
+ lib->lib_info.lib_caps, lib->lib_info.media_type);
+
+ pr_debug("IRAM Size 0x%x, offset 0x%x\n",
+ lib->slot_info.iram_size, lib->slot_info.iram_offset);
+ pr_debug("DRAM Size 0x%x, offset 0x%x\n",
+ lib->slot_info.dram_size, lib->slot_info.dram_offset);
+
+ switch (lib->lib_info.lib_type) {
+ case SST_CODEC_TYPE_MP3:
+ type = "mp3_";
+ break;
+ case SST_CODEC_TYPE_AAC:
+ type = "aac_";
+ break;
+ case SST_CODEC_TYPE_AACP:
+ type = "aac_v1_";
+ break;
+ case SST_CODEC_TYPE_eAACP:
+ type = "aac_v2_";
+ break;
+ case SST_CODEC_TYPE_WMA9:
+ type = "wma9_";
+ break;
+ default:
+ pr_err("Invalid codec type\n");
+ error = -EINVAL;
+ goto wake;
+ }
+
+ if (ops == STREAM_OPS_CAPTURE)
+ dir = "enc_";
+ else
+ dir = "dec_";
+ len = strlen(type) + strlen(dir);
+ strncpy(buf, type, sizeof(buf)-1);
+ strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
+ len += snprintf(buf + len, sizeof(buf) - len, "%d",
+ lib->slot_info.slot_num);
+ len += snprintf(buf + len, sizeof(buf) - len, ".bin");
+
+ pr_debug("Requesting %s\n", buf);
+
+ error = request_firmware(&fw_lib, buf, sst_drv_ctx->dev);
+ if (fw_lib == NULL) {
+ pr_err("fw_lib pointer is returning null\n");
+ return -EINVAL;
+ }
+ if (error) {
+ pr_err("library load failed %d\n", error);
+ goto wake;
+ }
+ error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
+ if (error)
+ goto wake_free;
+
+ lib->mod_entry_pt = entry_point;
+ memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
+ /* Prevent C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+ error = sst_download_library(fw_lib, &dload_info);
+ /* Re-enable Deeper C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+ if (error)
+ goto wake_free;
+
+ /* lib is downloaded and init send alloc again */
+ pr_debug("Library is downloaded now...\n");
+wake_free:
+ /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
+ release_firmware(fw_lib);
+wake:
+ return error;
+}
+
+/* In relocatable elf file, there can be relocatable variables and functions.
+ * Variables are kept in Global Address Offset Table (GOT) and functions in
+ * Procedural Linkage Table (PLT). In current codec binaries only relocatable
+ * variables are seen. So we use the GOT table.
+ */
+static int sst_find_got_table(Elf32_Shdr *shdr, int nsec, char *in_elf,
+ Elf32_Rela **got, unsigned int *cnt)
+{
+ int i = 0;
+ while (i < nsec) {
+ if (shdr[i].sh_type == SHT_RELA) {
+ *got = (Elf32_Rela *)(in_elf + shdr[i].sh_offset);
+ *cnt = shdr[i].sh_size / sizeof(Elf32_Rela);
+ break;
+ }
+ i++;
+ }
+ if (i == nsec)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* For each entry in the GOT table, find the unrelocated offset. Then
+ * add the relocation base to the offset and write back the new address to the
+ * original variable location.
+ */
+static int sst_relocate_got_entries(Elf32_Rela *table, unsigned int size,
+ char *in_elf, int elf_size, u32 rel_base)
+{
+ int i;
+ Elf32_Rela *entry;
+ Elf32_Addr *target_addr, unreloc_addr;
+
+ for (i = 0; i < size; i++) {
+ entry = &table[i];
+ if (ELF32_R_SYM(entry->r_info) != 0) {
+ return -EINVAL;
+ } else {
+ if (entry->r_offset > elf_size) {
+ pr_err("GOT table target addr out of range\n");
+ return -EINVAL;
+ }
+ target_addr = (Elf32_Addr *)(in_elf + entry->r_offset);
+ unreloc_addr = *target_addr + entry->r_addend;
+ if (unreloc_addr > elf_size) {
+ pr_err("GOT table entry invalid\n");
+ continue;
+ }
+ *target_addr = unreloc_addr + rel_base;
+ }
+ }
+ return 0;
+}
+
+static int sst_relocate_elf(char *in_elf, int elf_size, phys_addr_t rel_base,
+ Elf32_Addr *entry_pt)
+{
+ int retval = 0;
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)in_elf;
+ Elf32_Shdr *shdr = (Elf32_Shdr *) (in_elf + ehdr->e_shoff);
+ Elf32_Phdr *phdr = (Elf32_Phdr *) (in_elf + ehdr->e_phoff);
+ int i, num_sec;
+ Elf32_Rela *rel_table = NULL;
+ unsigned int rela_cnt = 0;
+ u32 rbase;
+
+ BUG_ON(rel_base > (u32)(-1));
+ rbase = (u32) (rel_base & (u32)(~0));
+
+ /* relocate the entry_pt */
+ *entry_pt = (Elf32_Addr)(ehdr->e_entry + rbase);
+ num_sec = ehdr->e_shnum;
+
+ /* Find the relocation(GOT) table through the section header */
+ retval = sst_find_got_table(shdr, num_sec, in_elf,
+ &rel_table, &rela_cnt);
+ if (retval < 0)
+ return retval;
+
+ /* Relocate all the entries in the GOT */
+ retval = sst_relocate_got_entries(rel_table, rela_cnt, in_elf,
+ elf_size, rbase);
+ if (retval < 0)
+ return retval;
+
+ pr_debug("GOT entries relocated\n");
+
+ /* Update the program headers in the ELF */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ if (phdr[i].p_type == PT_LOAD) {
+ phdr[i].p_vaddr += rbase;
+ phdr[i].p_paddr += rbase;
+ }
+ }
+ pr_debug("program header entries updated\n");
+
+ return retval;
+}
+
+#define ALIGN_256 0x100
+
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+ unsigned long *lib_base)
+{
+ int retval = 0;
+
+ pr_debug("library orig size = 0x%x", size);
+ if (size % ALIGN_256)
+ size += (ALIGN_256 - (size % ALIGN_256));
+ if (size > mgr->avail)
+ return -ENOMEM;
+
+ *lib_base = mgr->current_base;
+ mgr->current_base += size;
+ mgr->avail -= size;
+ mgr->count++;
+ pr_debug("library base = 0x%lx", *lib_base);
+ pr_debug("library aligned size = 0x%x", size);
+ pr_debug("lib count = %d\n", mgr->count);
+ return retval;
+
+}
+
+static int sst_download_lib_elf(struct intel_sst_drv *sst, const void *lib,
+ int size)
+{
+ int retval = 0;
+
+ pr_debug("In %s\n", __func__);
+
+ if (sst->use_dma) {
+ retval = sst_parse_elf_fw_dma(sst, lib,
+ &sst->library_list);
+ if (retval)
+ goto free_dma_res;
+ retval = sst_do_dma(&sst->library_list);
+ if (retval)
+ pr_err("sst_do_dma failed, abort\n");
+free_dma_res:
+ kfree(sst->library_list.src);
+ kfree(sst->library_list.dst);
+ sst->library_list.list_len = 0;
+ } else {
+ retval = sst_parse_elf_fw_memcpy(sst, lib,
+ &sst->libmemcpy_list);
+ if (retval)
+ return retval;
+ sst_do_memcpy(&sst->libmemcpy_list);
+ sst_memcpy_free_lib_resources();
+ }
+ pr_debug("download lib complete");
+ return retval;
+}
+
+static void sst_fill_fw_module_table(struct sst_module_info *mod_list,
+ int list_size, unsigned long ddr_base)
+{
+ int i;
+ u32 *write_ptr = (u32 *)ddr_base;
+
+ pr_debug("In %s\n", __func__);
+
+ for (i = 0; i < list_size; i++) {
+ if (mod_list[i].status == SST_LIB_DOWNLOADED) {
+ pr_debug("status dnwld for %d\n", i);
+ pr_debug("module id %d\n", mod_list[i].id);
+ pr_debug("entry pt 0x%x\n", mod_list[i].entry_pt);
+
+ *write_ptr++ = mod_list[i].id;
+ *write_ptr++ = mod_list[i].entry_pt;
+ }
+ }
+}
+
+static int sst_request_lib_elf(struct sst_module_info *mod_entry,
+ const struct firmware **fw_lib, int pci_id, struct device *dev)
+{
+ char name[25];
+ int retval = 0;
+
+ snprintf(name, sizeof(name), "%s%s%04x%s", mod_entry->name,
+ "_", pci_id, ".bin");
+ pr_debug("Requesting %s\n", name);
+
+ retval = request_firmware(fw_lib, name, dev);
+ if (retval) {
+ pr_err("%s library load failed %d\n", name, retval);
+ return retval;
+ }
+ pr_debug("got lib\n");
+ mod_entry->status = SST_LIB_FOUND;
+ return 0;
+}
+
+static int sst_allocate_lib_mem(const struct firmware *lib, int size,
+ struct sst_mem_mgr *mem_mgr, char **out_elf, unsigned long *lib_start)
+{
+ int retval = 0;
+
+ *out_elf = kzalloc(size, GFP_KERNEL);
+ if (!*out_elf) {
+ pr_err("cannot alloc mem for elf copy %d\n", retval);
+ goto mem_error;
+ }
+
+ memcpy(*out_elf, lib->data, size);
+ retval = sst_get_next_lib_mem(mem_mgr, size, lib_start);
+ if (retval < 0) {
+ pr_err("cannot alloc ddr mem for lib: %d\n", retval);
+ kfree(*out_elf);
+ goto mem_error;
+ }
+ return 0;
+
+mem_error:
+ release_firmware(lib);
+ return -ENOMEM;
+}
+
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx, struct sst_module_info *mod_table,
+ int num_modules)
+{
+ int retval = 0;
+ int i;
+ const struct firmware *fw_lib;
+ struct sst_module_info *mod = NULL;
+ char *out_elf;
+ unsigned int lib_size = 0;
+ unsigned int mod_table_offset = ctx->pdata->lib_info->mod_table_offset;
+ unsigned long lib_base;
+
+ pr_debug("In %s", __func__);
+
+ sst_init_lib_mem_mgr(ctx);
+
+ for (i = 0; i < num_modules; i++) {
+ mod = &mod_table[i];
+ trace_sst_lib_download("Start of Request Lib", mod->name);
+ retval = sst_request_lib_elf(mod, &fw_lib,
+ ctx->pci_id, ctx->dev);
+ if (retval < 0)
+ continue;
+ lib_size = fw_lib->size;
+
+ trace_sst_lib_download("End of Request Lib", mod->name);
+ retval = sst_validate_elf(fw_lib, true);
+ if (retval < 0) {
+ pr_err("library is not valid elf %d\n", retval);
+ release_firmware(fw_lib);
+ continue;
+ }
+ pr_debug("elf validated\n");
+ retval = sst_allocate_lib_mem(fw_lib, lib_size,
+ &ctx->lib_mem_mgr, &out_elf, &lib_base);
+ if (retval < 0) {
+ pr_err("lib mem allocation failed: %d\n", retval);
+ continue;
+ }
+ pr_debug("lib space allocated\n");
+
+ /* relocate in place */
+ retval = sst_relocate_elf(out_elf, lib_size,
+ lib_base, &mod->entry_pt);
+ if (retval < 0) {
+ pr_err("lib elf relocation failed: %d\n", retval);
+ release_firmware(fw_lib);
+ kfree(out_elf);
+ continue;
+ }
+ pr_debug("relocation done\n");
+ release_firmware(fw_lib);
+ trace_sst_lib_download("Start of download Lib", mod->name);
+ /* write to ddr imr region,use memcpy method */
+ retval = sst_download_lib_elf(ctx, out_elf, lib_size);
+ trace_sst_lib_download("End of download Lib", mod->name);
+ mod->status = SST_LIB_DOWNLOADED;
+ kfree(out_elf);
+ }
+
+ /* write module table to DDR */
+ sst_fill_fw_module_table(mod_table, num_modules,
+ (unsigned long)(ctx->ddr + mod_table_offset));
+ return retval;
+}
--- /dev/null
+/*
+ * sst_ipc.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file defines all ipc functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+void sst_dump_to_buffer(const void *from, size_t len, char *buf)
+{
+ int i, end;
+ const unsigned char *cmd = from;
+
+ if (len == 0) {
+ buf[0] = '\0';
+ return;
+ }
+
+ for (end = len - 1; end >= 0; end--)
+ if (cmd[end])
+ break;
+ end++;
+
+ buf += snprintf(buf, 3, "%02x", cmd[0]);
+ for (i = 1; i < len; i++) {
+ buf += snprintf(buf, 4, " %02x", cmd[i]);
+ if (i == end && end != len - 1) {
+ sprintf(buf, "...");
+ break;
+ }
+ }
+}
+
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+ u32 msg_id, u32 drv_id)
+{
+ struct sst_block *msg = NULL;
+
+ pr_debug("in %s\n", __func__);
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ pr_err("kzalloc block failed\n");
+ return NULL;
+ }
+ msg->condition = false;
+ msg->on = true;
+ msg->msg_id = msg_id;
+ msg->drv_id = drv_id;
+ spin_lock_bh(&ctx->block_lock);
+ list_add_tail(&msg->node, &ctx->block_list);
+ spin_unlock_bh(&ctx->block_lock);
+
+ return msg;
+}
+
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+ u32 drv_id, u32 ipc, void *data, u32 size)
+{
+ struct sst_block *block = NULL;
+
+ pr_debug("in %s\n", __func__);
+ spin_lock_bh(&ctx->block_lock);
+ list_for_each_entry(block, &ctx->block_list, node) {
+ pr_debug("Block ipc %d, drv_id %d\n", block->msg_id,
+ block->drv_id);
+ if (block->msg_id == ipc && block->drv_id == drv_id) {
+ pr_debug("free up the block\n");
+ block->ret_code = result;
+ block->data = data;
+ block->size = size;
+ block->condition = true;
+ spin_unlock_bh(&ctx->block_lock);
+ wake_up(&ctx->wait_queue);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&ctx->block_lock);
+ pr_debug("Block not found or a response is received for a short message for ipc %d, drv_id %d\n",
+ ipc, drv_id);
+ return -EINVAL;
+}
+
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
+{
+ struct sst_block *block = NULL, *__block;
+
+ pr_debug("in %s\n", __func__);
+ spin_lock_bh(&ctx->block_lock);
+ list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
+ if (block == freed) {
+ list_del(&freed->node);
+ kfree(freed->data);
+ freed->data = NULL;
+ kfree(freed);
+ spin_unlock_bh(&ctx->block_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&ctx->block_lock);
+ return -EINVAL;
+}
+
+/*
+ * sst_send_runtime_param - send runtime param to SST
+ *
+ * this function sends the runtime parameter to sst dsp engine
+ */
+static int sst_send_runtime_param(struct snd_sst_runtime_params *params)
+{
+ struct ipc_post *msg = NULL;
+ int ret_val;
+
+ pr_debug("Enter:%s\n", __func__);
+ ret_val = sst_create_ipc_msg(&msg, true);
+ if (ret_val)
+ return ret_val;
+ sst_fill_header(&msg->header, IPC_IA_SET_RUNTIME_PARAMS, 1,
+ params->str_id);
+ msg->header.part.data = sizeof(u32) + sizeof(*params) - sizeof(params->addr)
+ + params->size;
+ memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), params, sizeof(*params)
+ - sizeof(params->addr));
+ /* driver doesn't need to send address, so overwrite addr with data */
+ memcpy(msg->mailbox_data + sizeof(u32) + sizeof(*params)
+ - sizeof(params->addr),
+ params->addr, params->size);
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ return 0;
+}
+
+void sst_post_message_mrfld(struct work_struct *work)
+{
+ struct ipc_post *msg;
+ union ipc_header_mrfld header;
+ unsigned long irq_flags;
+
+ pr_debug("Enter:%s\n", __func__);
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ /* check list */
+ if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+ /* queue is empty, nothing to send */
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("Empty msg queue... NO Action\n");
+ return;
+ }
+
+ /* check busy bit */
+ header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+ if (header.p.header_high.part.busy) {
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("Busy not free... post later\n");
+ return;
+ }
+ /* copy msg from list */
+ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+ struct ipc_post, node);
+ list_del(&msg->node);
+ pr_debug("sst: size: = %x\n", msg->mrfld_header.p.header_low_payload);
+ if (msg->mrfld_header.p.header_high.part.large)
+ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+ msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+ trace_sst_ipc("POST ->", msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload,
+ msg->mrfld_header.p.header_high.part.drv_id);
+ trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+ sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("sst: Post message: header = %x\n",
+ msg->mrfld_header.p.header_high.full);
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ return;
+}
+
+/**
+* sst_post_message - Posts message to SST
+*
+* @work: Pointer to work structure
+*
+* This function is called by any component in driver which
+* wants to send an IPC message. This will post message only if
+* busy bit is free
+*/
+void sst_post_message_mfld(struct work_struct *work)
+{
+ struct ipc_post *msg;
+ union ipc_header header;
+ unsigned long irq_flags;
+
+ pr_debug("Enter:%s\n", __func__);
+
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ /* check list */
+ if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+ /* queue is empty, nothing to send */
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("Empty msg queue... NO Action\n");
+ return;
+ }
+
+ /* check busy bit */
+ header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+ if (header.part.busy) {
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("Busy not free... Post later\n");
+ return;
+ }
+ /* copy msg from list */
+ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+ struct ipc_post, node);
+ list_del(&msg->node);
+ pr_debug("size: = %x\n", msg->header.part.data);
+ if (msg->header.part.large)
+ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+ msg->mailbox_data, msg->header.part.data);
+
+ sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ pr_debug("Posted message: header = %x\n", msg->header.full);
+
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ return;
+}
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg)
+{
+ union ipc_header_mrfld header;
+ unsigned int loop_count = 0;
+ int retval = 0;
+ unsigned long irq_flags;
+
+ pr_debug("Enter:%s\n", __func__);
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+ /* check busy bit */
+ header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+ while (header.p.header_high.part.busy) {
+ if (loop_count > 10) {
+ pr_err("sst: Busy wait failed, cant send this msg\n");
+ retval = -EBUSY;
+ goto out;
+ }
+ udelay(500);
+ loop_count++;
+ header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+ }
+ pr_debug("sst: Post message: header = %x\n",
+ msg->mrfld_header.p.header_high.full);
+ pr_debug("sst: size = 0x%x\n", msg->mrfld_header.p.header_low_payload);
+ if (msg->mrfld_header.p.header_high.part.large)
+ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+ msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+ trace_sst_ipc("POST ->", msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload,
+ msg->mrfld_header.p.header_high.part.drv_id);
+ trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+ sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+
+out:
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ return retval;
+}
+
+/* use this for trigger ops to post syncronous msgs
+ */
+int sst_sync_post_message_mfld(struct ipc_post *msg)
+{
+ union ipc_header header;
+ unsigned int loop_count = 0;
+ int retval = 0;
+ unsigned long irq_flags;
+
+ pr_debug("Enter:%s\n", __func__);
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+ /* check busy bit */
+ header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+ while (header.part.busy) {
+ if (loop_count > 10) {
+ pr_err("busy wait failed, cant send this msg\n");
+ retval = -EBUSY;
+ goto out;
+ }
+ udelay(500);
+ loop_count++;
+ header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+ }
+ pr_debug("sst: Post message: header = %x\n", msg->header.full);
+ if (msg->header.part.large)
+ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+ msg->mailbox_data, msg->header.part.data);
+ sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+
+out:
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ kfree(msg->mailbox_data);
+ kfree(msg);
+
+ return retval;
+}
+
+/*
+ * sst_clear_interrupt - clear the SST FW interrupt
+ *
+ * This function clears the interrupt register after the interrupt
+ * bottom half is complete allowing next interrupt to arrive
+ */
+void intel_sst_clear_intr_mfld(void)
+{
+ union interrupt_reg isr;
+ union interrupt_reg imr;
+ union ipc_header clear_ipc;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
+ isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
+ /* write 1 to clear */;
+ isr.part.busy_interrupt = 1;
+ sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
+ /* Set IA done bit */
+ clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd);
+ clear_ipc.part.busy = 0;
+ clear_ipc.part.done = 1;
+ clear_ipc.part.data = IPC_ACK_SUCCESS;
+ sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd, clear_ipc.full);
+ /* un mask busy interrupt */
+ imr.part.busy_interrupt = 0;
+ imr.part.done_interrupt = 0;
+ sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+void intel_sst_clear_intr_mrfld(void)
+{
+ union interrupt_reg_mrfld isr;
+ union interrupt_reg_mrfld imr;
+ union ipc_header_mrfld clear_ipc;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
+ isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
+
+ /* write 1 to clear */
+ isr.part.busy_interrupt = 1;
+ sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
+
+ /* Set IA done bit */
+ clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
+
+ clear_ipc.p.header_high.part.busy = 0;
+ clear_ipc.p.header_high.part.done = 1;
+ clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
+ sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
+ /* un mask busy interrupt */
+ imr.part.busy_interrupt = 0;
+ sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+/*
+ * process_fw_init - process the FW init msg
+ *
+ * @msg: IPC message mailbox data from FW
+ *
+ * This function processes the FW init msg from FW
+ * marks FW state and prints debug info of loaded FW
+ */
+static void process_fw_init(void *msg)
+{
+ struct ipc_header_fw_init *init =
+ (struct ipc_header_fw_init *)msg;
+ int retval = 0;
+
+ pr_debug("*** FW Init msg came***\n");
+ if (init->result) {
+ sst_drv_ctx->sst_state = SST_ERROR;
+ pr_debug("FW Init failed, Error %x\n", init->result);
+ pr_err("FW Init failed, Error %x\n", init->result);
+ retval = init->result;
+ goto ret;
+ }
+ pr_info("FW Version %02x.%02x.%02x.%02x\n",
+ init->fw_version.type, init->fw_version.major,
+ init->fw_version.minor, init->fw_version.build);
+ pr_info("Build date %s Time %s\n",
+ init->build_info.date, init->build_info.time);
+
+ /* If there any runtime parameter to set, send it */
+ if (sst_drv_ctx->runtime_param.param.addr)
+ sst_send_runtime_param(&(sst_drv_ctx->runtime_param.param));
+
+ret:
+ sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
+}
+/**
+* sst_process_message_mfld - Processes message from SST
+*
+* @work: Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+void sst_process_message_mfld(struct ipc_post *msg)
+{
+ int str_id;
+ struct stream_info *stream;
+
+ str_id = msg->header.part.str_id;
+ pr_debug("IPC process for %x\n", msg->header.full);
+ /* based on msg in list call respective handler */
+ switch (msg->header.part.msg_id) {
+ case IPC_SST_PERIOD_ELAPSED:
+ if (sst_validate_strid(str_id)) {
+ pr_err("stream id %d invalid\n", str_id);
+ break;
+ }
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->period_elapsed)
+ stream->period_elapsed(stream->pcm_substream);
+ break;
+ case IPC_SST_BUF_UNDER_RUN:
+ case IPC_SST_BUF_OVER_RUN:
+ if (sst_validate_strid(str_id)) {
+ pr_err("stream id %d invalid\n", str_id);
+ break;
+ }
+ pr_err("Buffer under/overrun for %d\n",
+ msg->header.part.str_id);
+ pr_err("Got Underrun & not to send data...ignore\n");
+ break;
+
+ case IPC_SST_FRAGMENT_ELPASED: {
+ pr_debug("IPC_SST_FRAGMENT_ELPASED for %d", str_id);
+ sst_cdev_fragment_elapsed(str_id);
+ break;
+ }
+
+ case IPC_IA_PRINT_STRING:
+ pr_debug("been asked to print something by fw\n");
+ /* TBD */
+ break;
+
+ case IPC_IA_FW_INIT_CMPLT: {
+ /* send next data to FW */
+ process_fw_init(msg->mailbox_data);
+ break;
+ }
+
+ case IPC_SST_STREAM_PROCESS_FATAL_ERR:
+ if (sst_validate_strid(str_id)) {
+ pr_err("stream id %d invalid\n", str_id);
+ break;
+ }
+ pr_err("codec fatal error %x stream %d...\n",
+ msg->header.full, msg->header.part.str_id);
+ pr_err("Dropping the stream\n");
+ sst_drop_stream(msg->header.part.str_id);
+ break;
+ default:
+ /* Illegal case */
+ pr_err("Unhandled msg %x header %x\n",
+ msg->header.part.msg_id, msg->header.full);
+ }
+ return;
+}
+
+/**
+* sst_process_message - Processes message from SST
+*
+* @work: Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+
+void sst_process_message_mrfld(struct ipc_post *msg)
+{
+ int str_id;
+
+ str_id = msg->mrfld_header.p.header_high.part.drv_id;
+
+ pr_debug("IPC process message header %x payload %x\n",
+ msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload);
+
+ return;
+}
+
+#define VTSV_MAX_NUM_RESULTS 6
+#define VTSV_SIZE_PER_RESULT 7 /* 7 16 bit words */
+/* Max 6 results each of size 7 words + 1 num results word */
+#define VTSV_MAX_TOTAL_RESULT_SIZE \
+ (VTSV_MAX_NUM_RESULTS*VTSV_SIZE_PER_RESULT + 1)
+/* Each data word in the result is sent as a string in the format:
+DATAn=d, where n is the data word index varying from 0 to
+ VTSV_MAX_TOTAL_RESULT_SIZE-1
+d = string representation of data in decimal format;
+ unsigned 16bit data needs max 5 chars
+So total data string size = 4("DATA")+2("n")+1("=")
+ +5("d")+1(null)+5(reserved) = 18 */
+#define VTSV_DATA_STRING_SIZE 18
+
+static int send_vtsv_result_event(void *data, int size)
+{
+ char *envp[VTSV_MAX_TOTAL_RESULT_SIZE+3];
+ char res_size[30];
+ char ev_type[30];
+ char result[VTSV_MAX_TOTAL_RESULT_SIZE][VTSV_DATA_STRING_SIZE];
+ int offset = 0;
+ u16 *tmp;
+ int i;
+ int ret;
+
+ if (!data) {
+ pr_err("Data pointer Null into %s\n", __func__);
+ return -EINVAL;
+ }
+ size = size / (sizeof(u16)); /* Number of 16 bit data words*/
+ if (size > VTSV_MAX_TOTAL_RESULT_SIZE) {
+ pr_err("VTSV result size exceeds expected value, no uevent sent\n");
+ return -EINVAL;
+ }
+
+ snprintf(ev_type, sizeof(res_size), "EVENT_TYPE=SST_VTSV");
+ envp[offset++] = ev_type;
+ snprintf(res_size, sizeof(ev_type), "VTSV_RESULT_SIZE=%u", size);
+ envp[offset++] = res_size;
+ tmp = (u16 *)(data);
+ for (i = 0; i < size; i++) {
+ /* Driver assumes all data to be u16; The VTSV service
+ layer will type cast to u16 or s16 as appropriate for
+ a given data word*/
+ snprintf(result[i], VTSV_DATA_STRING_SIZE,
+ "DATA%u=%u", i, *tmp++);
+ envp[offset++] = result[i];
+ }
+ envp[offset] = NULL;
+ ret = kobject_uevent_env(&sst_drv_ctx->dev->kobj, KOBJ_CHANGE, envp);
+ if (ret)
+ pr_err("VTSV event send failed: ret = %d\n", ret);
+ return ret;
+}
+
+static void process_fw_async_msg(struct ipc_post *msg)
+{
+ u32 msg_id;
+ int str_id;
+ int res_size, ret;
+ u32 data_size, i;
+ void *data_offset;
+ struct stream_info *stream;
+ union ipc_header_high msg_high;
+ u32 msg_low, pipe_id;
+
+ msg_high = msg->mrfld_header.p.header_high;
+ msg_low = msg->mrfld_header.p.header_low_payload;
+ msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
+ data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
+ data_size = msg_low - (sizeof(struct ipc_dsp_hdr));
+
+ switch (msg_id) {
+ case IPC_SST_PERIOD_ELAPSED_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(pipe_id);
+ if (str_id > 0) {
+ pr_debug("Period elapsed rcvd for pipe id 0x%x\n", pipe_id);
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->period_elapsed)
+ stream->period_elapsed(stream->pcm_substream);
+ if (stream->compr_cb)
+ stream->compr_cb(stream->compr_cb_param);
+ }
+ break;
+
+ case IPC_IA_DRAIN_STREAM_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(pipe_id);
+ if (str_id > 0) {
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->drain_notify)
+ stream->drain_notify(stream->drain_cb_param);
+ }
+ break;
+
+ case IPC_IA_FW_ASYNC_ERR_MRFLD:
+ pr_err("FW sent async error msg:\n");
+ for (i = 0; i < (data_size/4); i++)
+ pr_err("0x%x\n", (*((unsigned int *)data_offset + i)));
+ break;
+
+ case IPC_IA_VTSV_DETECTED:
+ res_size = data_size;
+ ret = send_vtsv_result_event(data_offset, res_size);
+ if (ret)
+ pr_err("VTSV uevent send failed: %d\n", ret);
+ else
+ pr_debug("VTSV uevent sent\n");
+ break;
+
+ case IPC_IA_FW_INIT_CMPLT_MRFLD:
+ process_fw_init(data_offset);
+ break;
+
+ case IPC_IA_BUF_UNDER_RUN_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(pipe_id);
+ if (str_id > 0)
+ pr_err("Buffer under-run for pipe:%#x str_id:%d\n",
+ pipe_id, str_id);
+ break;
+
+ default:
+ pr_err("Unrecognized async msg from FW msg_id %#x\n", msg_id);
+ }
+}
+
+void sst_process_reply_mrfld(struct ipc_post *msg)
+{
+ unsigned int drv_id;
+ void *data;
+ union ipc_header_high msg_high;
+ u32 msg_low;
+
+ msg_high = msg->mrfld_header.p.header_high;
+ msg_low = msg->mrfld_header.p.header_low_payload;
+
+ pr_debug("IPC process message header %x payload %x\n",
+ msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload);
+
+ drv_id = msg_high.part.drv_id;
+
+ /* Check for async messages */
+ if (drv_id == SST_ASYNC_DRV_ID) {
+ /* FW sent async large message */
+ process_fw_async_msg(msg);
+ goto end;
+ }
+
+ /* FW sent short error response for an IPC */
+ if (msg_high.part.result && drv_id && !msg_high.part.large) {
+ /* 32-bit FW error code in msg_low */
+ pr_err("FW sent error response 0x%x", msg_low);
+ sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, NULL, 0);
+ goto end;
+ }
+
+ /* Process all valid responses */
+ /* if it is a large message, the payload contains the size to
+ * copy from mailbox */
+ if (msg_high.part.large) {
+ data = kzalloc(msg_low, GFP_KERNEL);
+ if (!data)
+ goto end;
+ memcpy(data, (void *) msg->mailbox_data, msg_low);
+ if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, data, msg_low))
+ kfree(data);
+ } else {
+ sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, NULL, 0);
+ }
+
+end:
+ return;
+}
+
+/**
+* sst_process_reply - Processes reply message from SST
+*
+* @work: Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a reply msg from response_queue and
+* does action based on msg
+*/
+void sst_process_reply_mfld(struct ipc_post *msg)
+{
+ void *data;
+ int str_id;
+ struct stream_info *stream;
+
+
+ str_id = msg->header.part.str_id;
+
+ pr_debug("sst: IPC process reply for %x\n", msg->header.full);
+
+ /* handle drain notify first */
+ if (msg->header.part.msg_id == IPC_IA_DRAIN_STREAM) {
+ pr_debug("drain message notify\n");
+ if (str_id > 0) {
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->drain_notify)
+ stream->drain_notify(stream->drain_cb_param);
+ }
+ return;
+ }
+
+
+ if (!msg->header.part.large) {
+ if (!msg->header.part.data)
+ pr_debug("Success\n");
+ else
+ pr_err("Error from firmware: %d\n", msg->header.part.data);
+ sst_wake_up_block(sst_drv_ctx, msg->header.part.data,
+ str_id, msg->header.part.msg_id, NULL, 0);
+ } else {
+ pr_debug("Allocating %d\n", msg->header.part.data);
+ data = kzalloc(msg->header.part.data, GFP_KERNEL);
+ if (!data) {
+ pr_err("sst: mem alloc failed\n");
+ return;
+ }
+
+ memcpy(data, (void *)msg->mailbox_data, msg->header.part.data);
+ if (sst_wake_up_block(sst_drv_ctx, 0, str_id,
+ msg->header.part.msg_id, data,
+ msg->header.part.data))
+ kfree(data);
+ }
+ return;
+}
--- /dev/null
+/*
+ * sst_pvt.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file contains all private functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/asound.h>
+#include <sound/pcm.h>
+#include <sound/compress_offload.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define SST_EXCE_DUMP_BASE 0xFFFF2c00
+#define SST_EXCE_DUMP_WORD 4
+#define SST_EXCE_DUMP_LEN 32
+#define SST_EXCE_DUMP_SIZE ((SST_EXCE_DUMP_LEN)*(SST_EXCE_DUMP_WORD))
+#define SST_EXCE_DUMP_OFFSET 0xA00
+/*
+ * sst_wait_interruptible - wait on event
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits without a timeout (and is interruptable) for a
+ * given block event
+ */
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block)
+{
+ int retval = 0;
+
+ if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
+ block->condition)) {
+ /* event wake */
+ if (block->ret_code < 0) {
+ pr_err("stream failed %d\n", block->ret_code);
+ retval = -EBUSY;
+ } else {
+ pr_debug("event up\n");
+ retval = 0;
+ }
+ } else {
+ pr_err("signal interrupted\n");
+ retval = -EINTR;
+ }
+ return retval;
+
+}
+
+unsigned long long read_shim_data(struct intel_sst_drv *sst, int addr)
+{
+ unsigned long long val = 0;
+
+ switch (sst->pci_id) {
+ case SST_CLV_PCI_ID:
+ val = sst_shim_read(sst->shim, addr);
+ break;
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_PCI_ID:
+ val = sst_shim_read64(sst->shim, addr);
+ break;
+ }
+ return val;
+}
+
+void write_shim_data(struct intel_sst_drv *sst, int addr,
+ unsigned long long data)
+{
+ switch (sst->pci_id) {
+ case SST_CLV_PCI_ID:
+ sst_shim_write(sst->shim, addr, (u32) data);
+ break;
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_PCI_ID:
+ sst_shim_write64(sst->shim, addr, (u64) data);
+ break;
+ }
+}
+
+
+void dump_sst_shim(struct intel_sst_drv *sst)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+ pr_err("audio shim registers:\n"
+ "CSR: %.8llx\n"
+ "PISR: %.8llx\n"
+ "PIMR: %.8llx\n"
+ "ISRX: %.8llx\n"
+ "ISRD: %.8llx\n"
+ "IMRX: %.8llx\n"
+ "IMRD: %.8llx\n"
+ "IPCX: %.8llx\n"
+ "IPCD: %.8llx\n"
+ "ISRSC: %.8llx\n"
+ "ISRLPESC: %.8llx\n"
+ "IMRSC: %.8llx\n"
+ "IMRLPESC: %.8llx\n"
+ "IPCSC: %.8llx\n"
+ "IPCLPESC: %.8llx\n"
+ "CLKCTL: %.8llx\n"
+ "CSR2: %.8llx\n",
+ read_shim_data(sst, SST_CSR),
+ read_shim_data(sst, SST_PISR),
+ read_shim_data(sst, SST_PIMR),
+ read_shim_data(sst, SST_ISRX),
+ read_shim_data(sst, SST_ISRD),
+ read_shim_data(sst, SST_IMRX),
+ read_shim_data(sst, SST_IMRD),
+ read_shim_data(sst, sst->ipc_reg.ipcx),
+ read_shim_data(sst, sst->ipc_reg.ipcd),
+ read_shim_data(sst, SST_ISRSC),
+ read_shim_data(sst, SST_ISRLPESC),
+ read_shim_data(sst, SST_IMRSC),
+ read_shim_data(sst, SST_IMRLPESC),
+ read_shim_data(sst, SST_IPCSC),
+ read_shim_data(sst, SST_IPCLPESC),
+ read_shim_data(sst, SST_CLKCTL),
+ read_shim_data(sst, SST_CSR2));
+ spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+void reset_sst_shim(struct intel_sst_drv *sst)
+{
+ union config_status_reg_mrfld csr;
+
+ pr_err("Resetting few Shim registers\n");
+ write_shim_data(sst, sst->ipc_reg.ipcx, 0x0);
+ write_shim_data(sst, sst->ipc_reg.ipcd, 0x0);
+ write_shim_data(sst, SST_ISRX, 0x0);
+ write_shim_data(sst, SST_ISRD, 0x0);
+ write_shim_data(sst, SST_IPCSC, 0x0);
+ write_shim_data(sst, SST_IPCLPESC, 0x0);
+ write_shim_data(sst, SST_ISRSC, 0x0);
+ write_shim_data(sst, SST_ISRLPESC, 0x0);
+ write_shim_data(sst, SST_PISR, 0x0);
+
+ /* Reset the CSR value to the default value. i.e 0x1e40001*/
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ csr.part.xt_snoop = 0;
+ csr.full &= ~(0xf);
+ csr.full |= 0x01;
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+}
+
+static void dump_sst_crash_area(void)
+{
+ void __iomem *fw_dump_area;
+ u32 dump_word;
+ u8 i;
+
+ /* dump the firmware SRAM where the exception details are stored */
+ fw_dump_area = ioremap_nocache(SST_EXCE_DUMP_BASE, SST_EXCE_DUMP_SIZE);
+
+ pr_err("Firmware exception dump begins:\n");
+ pr_err("Exception start signature:%#x\n", readl(fw_dump_area + SST_EXCE_DUMP_WORD));
+ pr_err("EXCCAUSE:\t\t\t%#x\n", readl(fw_dump_area + SST_EXCE_DUMP_WORD*2));
+ pr_err("EXCVADDR:\t\t\t%#x\n", readl(fw_dump_area + (SST_EXCE_DUMP_WORD*3)));
+ pr_err("Firmware additional data:\n");
+
+ /* dump remaining FW debug data */
+ for (i = 1; i < (SST_EXCE_DUMP_LEN-4+1); i++) {
+ dump_word = readl(fw_dump_area + (SST_EXCE_DUMP_WORD*3)
+ + (i*SST_EXCE_DUMP_WORD));
+ pr_err("Data[%d]=%#x\n", i, dump_word);
+ }
+ iounmap(fw_dump_area);
+ pr_err("Firmware exception dump ends\n");
+}
+
+/**
+ * dump_ram_area - dumps the iram/dram into a local buff
+ *
+ * @sst : pointer to driver context
+ * @recovery : pointer to the struct containing buffers
+ * @iram : true if iram dump else false
+ * This function dumps the iram dram data into the respective buffers
+ */
+static void dump_ram_area(struct intel_sst_drv *sst,
+ struct sst_dump_buf *dump_buf, enum sst_ram_type type)
+{
+ if (type == SST_IRAM) {
+ pr_err("Iram dumped in buffer\n");
+ memcpy_fromio(dump_buf->iram_buf.buf, sst->iram,
+ dump_buf->iram_buf.size);
+ } else {
+ pr_err("Dram dumped in buffer\n");
+ memcpy_fromio(dump_buf->dram_buf.buf, sst->dram,
+ dump_buf->dram_buf.size);
+ }
+}
+
+/*FIXME Disabling IRAM/DRAM dump for timeout issues */
+static void sst_stream_recovery(struct intel_sst_drv *sst)
+{
+ struct stream_info *str_info;
+ u8 i;
+ for (i = 1; i <= sst->info.max_streams; i++) {
+ pr_err("Audio: Stream %d, state %d\n", i, sst->streams[i].status);
+ if (sst->streams[i].status != STREAM_UN_INIT) {
+ str_info = &sst_drv_ctx->streams[i];
+ if (str_info->pcm_substream)
+ snd_pcm_stop(str_info->pcm_substream, SNDRV_PCM_STATE_SETUP);
+ else if (str_info->compr_cb_param)
+ snd_compr_stop(str_info->compr_cb_param);
+ sst->streams[i].status = STREAM_RESET;
+ }
+ }
+}
+
+static void sst_dump_ipc_dispatch_lists(struct intel_sst_drv *sst)
+{
+ struct ipc_post *m, *_m;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+ if (list_empty(&sst->ipc_dispatch_list))
+ pr_err("ipc dispatch list is Empty\n");
+
+ list_for_each_entry_safe(m, _m, &sst->ipc_dispatch_list, node) {
+ pr_err("ipc-dispatch:pending msg header %#x\n", m->header.full);
+ list_del(&m->node);
+ kfree(m->mailbox_data);
+ kfree(m);
+ }
+ spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+static void sst_dump_rx_lists(struct intel_sst_drv *sst)
+{
+ struct ipc_post *m, *_m;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst->rx_msg_lock, irq_flags);
+ if (list_empty(&sst->rx_list))
+ pr_err("rx msg list is empty\n");
+
+ list_for_each_entry_safe(m, _m, &sst->rx_list, node) {
+ pr_err("rx: pending msg header %#x\n", m->header.full);
+ list_del(&m->node);
+ kfree(m->mailbox_data);
+ kfree(m);
+ }
+ spin_unlock_irqrestore(&sst->rx_msg_lock, irq_flags);
+}
+
+/* num_dwords: should be multiple of 4 */
+static void dump_buffer_fromio(void __iomem *from,
+ unsigned int num_dwords)
+{
+ int i;
+ u32 val[4];
+
+ if (num_dwords % 4) {
+ pr_err("%s: num_dwords %d not multiple of 4\n",
+ __func__, num_dwords);
+ return;
+ }
+
+ pr_err("****** Start *******\n");
+ pr_err("Dump %d dwords, from location %p\n", num_dwords, from);
+
+ for (i = 0; i < num_dwords; ) {
+ val[0] = ioread32(from + (i++ * 4));
+ val[1] = ioread32(from + (i++ * 4));
+ val[2] = ioread32(from + (i++ * 4));
+ val[3] = ioread32(from + (i++ * 4));
+ pr_err("%.8x %.8x %.8x %.8x\n", val[0], val[1], val[2], val[3]);
+ }
+ pr_err("****** End *********\n\n\n");
+}
+
+static void sst_stall_lpe_n_wait(struct intel_sst_drv *sst)
+{
+ union config_status_reg_mrfld csr;
+ void __iomem *dma_reg0 = sst->debugfs.dma_reg[0];
+ void __iomem *dma_reg1 = sst->debugfs.dma_reg[1];
+ int offset = 0x3A0; /* ChEnReg of DMA */
+
+
+ pr_err("Before stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+ sst_reg_read64(dma_reg0, offset),
+ sst_reg_read64(dma_reg1, offset));
+
+ /* Stall LPE */
+ csr.full = sst_shim_read64(sst->shim, SST_CSR);
+ csr.part.runstall = 1;
+ sst_shim_write64(sst->shim, SST_CSR, csr.full);
+
+ /* A 5ms delay, before resetting the LPE */
+ usleep_range(5000, 5100);
+
+ pr_err("After stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+ sst_reg_read64(dma_reg0, offset),
+ sst_reg_read64(dma_reg1, offset));
+}
+
+#if IS_ENABLED(CONFIG_INTEL_SCU_IPC)
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+ int ret = 0;
+
+ /* Reset and power gate the LPE */
+ ret = intel_scu_ipc_simple_command(IPC_SCU_LPE_RESET, 0);
+ if (ret) {
+ pr_err("Power gating LPE failed %d\n", ret);
+ reset_sst_shim(sst);
+ } else {
+ pr_err("LPE reset via SCU is success!!\n");
+ pr_err("dump after LPE power cycle\n");
+ dump_sst_shim(sst);
+
+ /* Mask the DMA & SSP interrupts */
+ sst_shim_write64(sst->shim, SST_IMRX, 0xFFFF0038);
+ }
+}
+#else
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+ pr_debug("%s: do nothing, just return\n", __func__);
+}
+#endif
+
+#define SRAM_OFFSET_MRFLD 0xc00
+#define NUM_DWORDS 256
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst)
+{
+ char iram_event[30], dram_event[30], ddr_imr_event[65], event_type[30];
+ char *envp[5];
+ int env_offset = 0;
+
+ /*
+ * setting firmware state as uninit so that the firmware will get
+ * redownloaded on next request.This is because firmare not responding
+ * for 1 sec is equalant to some unrecoverable error of FW.
+ */
+ pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+ pr_err("Audio: trying to reset the dsp now\n");
+
+ mutex_lock(&sst->sst_lock);
+ sst->sst_state = SST_UN_INIT;
+ sst_stream_recovery(sst);
+ mutex_unlock(&sst->sst_lock);
+
+ dump_stack();
+ dump_sst_shim(sst);
+
+ sst_stall_lpe_n_wait(sst);
+
+ /* dump mailbox and sram */
+ pr_err("Dumping Mailbox...\n");
+ dump_buffer_fromio(sst->mailbox, NUM_DWORDS);
+ pr_err("Dumping SRAM...\n");
+ dump_buffer_fromio(sst->mailbox + SRAM_OFFSET_MRFLD, NUM_DWORDS);
+
+ if (sst_drv_ctx->ops->set_bypass) {
+
+ sst_drv_ctx->ops->set_bypass(true);
+ dump_ram_area(sst, &(sst->dump_buf), SST_IRAM);
+ dump_ram_area(sst, &(sst->dump_buf), SST_DRAM);
+ sst_drv_ctx->ops->set_bypass(false);
+
+ }
+
+ snprintf(event_type, sizeof(event_type), "EVENT_TYPE=SST_RECOVERY");
+ envp[env_offset++] = event_type;
+ snprintf(iram_event, sizeof(iram_event), "IRAM_DUMP_SIZE=%d",
+ sst->dump_buf.iram_buf.size);
+ envp[env_offset++] = iram_event;
+ snprintf(dram_event, sizeof(dram_event), "DRAM_DUMP_SIZE=%d",
+ sst->dump_buf.dram_buf.size);
+ envp[env_offset++] = dram_event;
+
+ if (sst->ddr != NULL) {
+ snprintf(ddr_imr_event, sizeof(ddr_imr_event),
+ "DDR_IMR_DUMP_SIZE=%d DDR_IMR_ADDRESS=%p", (sst->ddr_end - sst->ddr_base), sst->ddr);
+ envp[env_offset++] = ddr_imr_event;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sst->dev->kobj, KOBJ_CHANGE, envp);
+ pr_err("Recovery Uevent Sent!!\n");
+
+ /* Send IPC to SCU to power gate and reset the LPE */
+ sst_send_scu_reset_ipc(sst);
+
+ pr_err("reset the pvt id from val %d\n", sst_drv_ctx->pvt_id);
+ spin_lock(&sst_drv_ctx->pvt_id_lock);
+ sst_drv_ctx->pvt_id = 0;
+ spin_unlock(&sst_drv_ctx->pvt_id_lock);
+ sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+ sst_dump_rx_lists(sst_drv_ctx);
+
+ if (sst_drv_ctx->fw_in_mem) {
+ pr_err("Clearing the cached FW copy...\n");
+ kfree(sst_drv_ctx->fw_in_mem);
+ sst_drv_ctx->fw_in_mem = NULL;
+ }
+}
+
+void sst_do_recovery(struct intel_sst_drv *sst)
+{
+ pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+
+ dump_stack();
+ dump_sst_shim(sst);
+
+ if (sst->sst_state == SST_FW_RUNNING &&
+ sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+ dump_sst_crash_area();
+
+ sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+
+}
+
+/*
+ * sst_wait_timeout - wait on event for timeout
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits with a timeout value (and is not interruptible) on a
+ * given block event
+ */
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
+{
+ int retval = 0;
+
+ /* NOTE:
+ Observed that FW processes the alloc msg and replies even
+ before the alloc thread has finished execution */
+ pr_debug("sst: waiting for condition %x ipc %d drv_id %d\n",
+ block->condition, block->msg_id, block->drv_id);
+ if (wait_event_timeout(sst_drv_ctx->wait_queue,
+ block->condition,
+ msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
+ /* event wake */
+ pr_debug("sst: Event wake %x\n", block->condition);
+ pr_debug("sst: message ret: %d\n", block->ret_code);
+ retval = -block->ret_code;
+ } else {
+ block->on = false;
+ pr_err("sst: Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
+ block->condition, block->msg_id, sst_drv_ctx->sst_state);
+
+ if (sst_drv_ctx->sst_state == SST_FW_LOADED ||
+ sst_drv_ctx->sst_state == SST_START_INIT) {
+ pr_err("Can't recover as timedout while downloading the FW\n");
+ pr_err("reseting fw state to unint from %d ...\n", sst_drv_ctx->sst_state);
+ sst_drv_ctx->sst_state = SST_UN_INIT;
+
+ dump_sst_shim(sst_drv_ctx);
+
+ /* Reset & Power Off the LPE only for MRFLD */
+ if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) {
+ sst_stall_lpe_n_wait(sst_drv_ctx);
+
+ /* Send IPC to SCU to power gate and reset the LPE */
+ sst_send_scu_reset_ipc(sst_drv_ctx);
+ }
+
+ } else {
+ if (sst_drv_ctx->ops->do_recovery)
+ sst_drv_ctx->ops->do_recovery(sst_drv_ctx);
+ }
+
+ retval = -EBUSY;
+ }
+ return retval;
+}
+
+/*
+ * sst_create_ipc_msg - create a IPC message
+ *
+ * @arg: ipc message
+ * @large: large or short message
+ *
+ * this function allocates structures to send a large or short
+ * message to the firmware
+ */
+int sst_create_ipc_msg(struct ipc_post **arg, bool large)
+{
+ struct ipc_post *msg;
+
+ msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
+ if (!msg) {
+ pr_err("kzalloc ipc msg failed\n");
+ return -ENOMEM;
+ }
+ if (large) {
+ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
+ if (!msg->mailbox_data) {
+ kfree(msg);
+ pr_err("kzalloc mailbox_data failed");
+ return -ENOMEM;
+ }
+ } else {
+ msg->mailbox_data = NULL;
+ }
+ msg->is_large = large;
+ *arg = msg;
+ return 0;
+}
+
+/*
+ * sst_create_block_and_ipc_msg - Creates IPC message and sst block
+ * @arg: passed to sst_create_ipc_message API
+ * @large: large or short message
+ * @sst_drv_ctx: sst driver context
+ * @block: return block allocated
+ * @msg_id: IPC
+ * @drv_id: stream id or private id
+ */
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+ struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+ u32 msg_id, u32 drv_id)
+{
+ int retval = 0;
+ retval = sst_create_ipc_msg(arg, large);
+ if (retval)
+ return retval;
+ *block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
+ if (*block == NULL) {
+ kfree(*arg);
+ return -ENOMEM;
+ }
+ return retval;
+}
+
+/*
+ * sst_clean_stream - clean the stream context
+ *
+ * @stream: stream structure
+ *
+ * this function resets the stream contexts
+ * should be called in free
+ */
+void sst_clean_stream(struct stream_info *stream)
+{
+ stream->status = STREAM_UN_INIT;
+ stream->prev = STREAM_UN_INIT;
+ mutex_lock(&stream->lock);
+ stream->cumm_bytes = 0;
+ mutex_unlock(&stream->lock);
+}
+
--- /dev/null
+/*
+ * sst_stream.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file contains the stream operations of SST driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+/**
+ * sst_alloc_stream - Send msg for a new stream ID
+ *
+ * @params: stream params
+ * @stream_ops: operation of stream PB/capture
+ * @codec: codec for stream
+ * @device: device stream to be allocated for
+ *
+ * This function is called by any function which wants to start
+ * a new stream. This also check if a stream exists which is idle
+ * it initializes idle stream id to this request
+ */
+int sst_alloc_stream_ctp(char *params, struct sst_block *block)
+{
+ struct ipc_post *msg = NULL;
+ struct snd_sst_alloc_params alloc_param;
+ unsigned int pcm_slot = 0x03, num_ch;
+ int str_id;
+ struct snd_sst_params *str_params;
+ struct snd_sst_stream_params *sparams;
+ struct snd_sst_alloc_params_ext *aparams;
+ struct stream_info *str_info;
+ unsigned int stream_ops, device;
+ u8 codec;
+
+ pr_debug("In %s\n", __func__);
+
+ BUG_ON(!params);
+ str_params = (struct snd_sst_params *)params;
+ stream_ops = str_params->ops;
+ codec = str_params->codec;
+ device = str_params->device_type;
+ sparams = &str_params->sparams;
+ aparams = &str_params->aparams;
+ num_ch = sst_get_num_channel(str_params);
+
+ pr_debug("period_size = %d\n", aparams->frag_size);
+ pr_debug("ring_buf_addr = 0x%x\n", aparams->ring_buf_info[0].addr);
+ pr_debug("ring_buf_size = %d\n", aparams->ring_buf_info[0].size);
+ pr_debug("In alloc device_type=%d\n", str_params->device_type);
+ pr_debug("In alloc sg_count =%d\n", aparams->sg_count);
+
+ str_id = str_params->stream_id;
+ if (str_id <= 0)
+ return -EBUSY;
+
+ /*allocate device type context*/
+ sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
+ str_id, stream_ops, pcm_slot);
+ /* send msg to FW to allocate a stream */
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+
+ alloc_param.str_type.codec_type = codec;
+ alloc_param.str_type.str_type = str_params->stream_type;
+ alloc_param.str_type.operation = stream_ops;
+ alloc_param.str_type.protected_str = 0; /* non drm */
+ alloc_param.str_type.time_slots = pcm_slot;
+ alloc_param.str_type.reserved = 0;
+ alloc_param.str_type.result = 0;
+ memcpy(&alloc_param.stream_params, sparams,
+ sizeof(struct snd_sst_stream_params));
+ memcpy(&alloc_param.alloc_params, aparams,
+ sizeof(struct snd_sst_alloc_params_ext));
+ block->drv_id = str_id;
+ block->msg_id = IPC_IA_ALLOC_STREAM;
+ sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
+ msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
+ sizeof(alloc_param));
+ str_info = &sst_drv_ctx->streams[str_id];
+ str_info->num_ch = num_ch;
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ return str_id;
+}
+
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block)
+{
+ struct ipc_post *msg = NULL;
+ struct snd_sst_alloc_mrfld alloc_param;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct snd_sst_params *str_params;
+ struct snd_sst_tstamp fw_tstamp;
+ unsigned int str_id, pipe_id, pvt_id, task_id;
+ u32 len = 0;
+ struct stream_info *str_info;
+ int i, num_ch;
+
+ pr_debug("In %s\n", __func__);
+ BUG_ON(!params);
+
+ str_params = (struct snd_sst_params *)params;
+ memset(&alloc_param, 0, sizeof(alloc_param));
+ alloc_param.operation = str_params->ops;
+ alloc_param.codec_type = str_params->codec;
+ alloc_param.sg_count = str_params->aparams.sg_count;
+ alloc_param.ring_buf_info[0].addr = str_params->aparams.ring_buf_info[0].addr;
+ alloc_param.ring_buf_info[0].size = str_params->aparams.ring_buf_info[0].size;
+ alloc_param.frag_size = str_params->aparams.frag_size;
+
+ memcpy(&alloc_param.codec_params, &str_params->sparams,
+ sizeof(struct snd_sst_stream_params));
+
+ /* fill channel map params for multichannel support.
+ * Ideally channel map should be received from upper layers
+ * for multichannel support.
+ * Currently hardcoding as per FW reqm.
+ */
+ num_ch = sst_get_num_channel(str_params);
+ for (i = 0; i < 8; i++) {
+ if (i < num_ch)
+ alloc_param.codec_params.uc.pcm_params.channel_map[i] = i;
+ else
+ alloc_param.codec_params.uc.pcm_params.channel_map[i] = 0xFF;
+ }
+
+ str_id = str_params->stream_id;
+ pipe_id = str_params->device_type;
+ task_id = str_params->task;
+ sst_drv_ctx->streams[str_id].pipe_id = pipe_id;
+ sst_drv_ctx->streams[str_id].task_id = task_id;
+ sst_drv_ctx->streams[str_id].num_ch = num_ch;
+
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ if (sst_drv_ctx->info.lpe_viewpt_rqd)
+ alloc_param.ts = sst_drv_ctx->info.mailbox_start +
+ sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+ else
+ alloc_param.ts = sst_drv_ctx->mailbox_add +
+ sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+
+ pr_debug("alloc tstamp location = 0x%x\n", alloc_param.ts);
+ pr_debug("assigned pipe id 0x%x to task %d\n", pipe_id, task_id);
+
+ /*allocate device type context*/
+ sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
+ str_id, alloc_param.operation, 0);
+ /* send msg to FW to allocate a stream */
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+
+ block->drv_id = pvt_id;
+ block->msg_id = IPC_CMD;
+
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ task_id, 1, pvt_id);
+ pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+ len = msg->mrfld_header.p.header_low_payload = sizeof(alloc_param) + sizeof(dsp_hdr);
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param));
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ memcpy(msg->mailbox_data + sizeof(dsp_hdr), &alloc_param,
+ sizeof(alloc_param));
+ trace_sst_stream("ALLOC ->", str_id, pipe_id);
+ str_info = &sst_drv_ctx->streams[str_id];
+ pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+ pr_debug("response rqd: %x", msg->mrfld_header.p.header_high.part.res_rqd);
+ pr_debug("calling post_message\n");
+ pr_info("Alloc for str %d pipe %#x\n", str_id, pipe_id);
+
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ return str_id;
+}
+
+/**
+* sst_stream_stream - Send msg for a pausing stream
+* @str_id: stream ID
+*
+* This function is called by any function which wants to start
+* a stream.
+*/
+int sst_start_stream(int str_id)
+{
+ int retval = 0, pvt_id;
+ u32 len = 0;
+ struct ipc_post *msg = NULL;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct stream_info *str_info;
+
+ pr_debug("sst_start_stream for %d\n", str_id);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ if (str_info->status != STREAM_RUNNING)
+ return -EBADRQC;
+
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ pr_debug("pvt_id = %d, pipe id = %d, task = %d\n",
+ pvt_id, str_info->pipe_id, str_info->task_id);
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+
+ len = sizeof(u16) + sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_START_STREAM_MRFLD,
+ str_info->pipe_id, sizeof(u16));
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ memset(msg->mailbox_data + sizeof(dsp_hdr), 0, sizeof(u16));
+ trace_sst_stream("START ->", str_id, str_info->pipe_id);
+ pr_info("Start for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+ } else {
+ pr_debug("fill START_STREAM for CTP\n");
+ sst_fill_header(&msg->header, IPC_IA_START_STREAM, 1, str_id);
+ msg->header.part.data = sizeof(u32) + sizeof(u32);
+ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+ memset(msg->mailbox_data + sizeof(u32), 0, sizeof(u32));
+ }
+ sst_drv_ctx->ops->sync_post_message(msg);
+ return retval;
+}
+
+int sst_send_byte_stream_mrfld(void *sbytes)
+{
+ struct ipc_post *msg = NULL;
+ struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *) sbytes;
+ u32 length;
+ int pvt_id, ret = 0;
+ struct sst_block *block = NULL;
+
+ pr_debug("%s: type:%u ipc_msg:%u block:%u task_id:%u pipe: %#x length:%#x\n",
+ __func__, bytes->type, bytes->ipc_msg,
+ bytes->block, bytes->task_id,
+ bytes->pipe_id, bytes->len);
+
+ /* need some err check as this is user data, perhpas move this to the
+ * platform driver and pass the struct
+ */
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg, bytes->task_id,
+ 1, pvt_id);
+ msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
+ length = bytes->len;
+ msg->mrfld_header.p.header_low_payload = length;
+ pr_debug("length is %d\n", length);
+ memcpy(msg->mailbox_data, &bytes->bytes, bytes->len);
+ trace_sst_stream("BYTES ->", bytes->type, bytes->pipe_id);
+ if (bytes->block) {
+ block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id);
+ if (block == NULL) {
+ kfree(msg);
+ return -ENOMEM;
+ }
+ }
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ pr_debug("msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload);
+ if (bytes->block) {
+ ret = sst_wait_timeout(sst_drv_ctx, block);
+ if (ret) {
+ pr_err("%s: fw returned err %d\n", __func__, ret);
+ sst_free_block(sst_drv_ctx, block);
+ return ret;
+ }
+ }
+ if (bytes->type == SND_SST_BYTES_GET) {
+ /* copy the reply and send back
+ * we need to update only sz and payload
+ */
+ if (bytes->block) {
+ unsigned char *r = block->data;
+ pr_debug("read back %d bytes", bytes->len);
+ memcpy(bytes->bytes, r, bytes->len);
+ trace_sst_stream("BYTES <-", bytes->type, bytes->pipe_id);
+ }
+ }
+ if (bytes->block)
+ sst_free_block(sst_drv_ctx, block);
+ return 0;
+}
+
+int sst_send_probe_bytes(struct intel_sst_drv *sst)
+{
+ struct ipc_post *msg = NULL;
+ struct sst_block *block;
+ int ret_val = 0;
+
+ ret_val = sst_create_block_and_ipc_msg(&msg, true, sst,
+ &block, IPC_IA_DBG_SET_PROBE_PARAMS, 0);
+ if (ret_val) {
+ pr_err("Can't allocate block/msg: Probe Byte Stream\n");
+ return ret_val;
+ }
+
+ sst_fill_header(&msg->header, IPC_IA_DBG_SET_PROBE_PARAMS, 1, 0);
+
+ msg->header.part.data = sizeof(u32) + sst->probe_bytes->len;
+ memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+ memcpy(msg->mailbox_data + sizeof(u32), sst->probe_bytes->bytes,
+ sst->probe_bytes->len);
+
+ sst_add_to_dispatch_list_and_post(sst, msg);
+ ret_val = sst_wait_timeout(sst, block);
+ sst_free_block(sst, block);
+ if (ret_val)
+ pr_err("set probe stream param..timeout!\n");
+ return ret_val;
+}
+
+/*
+ * sst_pause_stream - Send msg for a pausing stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to pause
+ * an already running stream.
+ */
+int sst_pause_stream(int str_id)
+{
+ int retval = 0, pvt_id, len;
+ struct ipc_post *msg = NULL;
+ struct stream_info *str_info;
+ struct intel_sst_ops *ops;
+ struct sst_block *block;
+ struct ipc_dsp_hdr dsp_hdr;
+
+ pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+ if (str_info->status == STREAM_PAUSED)
+ return 0;
+ if (str_info->status == STREAM_RUNNING ||
+ str_info->status == STREAM_INIT) {
+ if (str_info->prev == STREAM_UN_INIT)
+ return -EBADRQC;
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ retval = sst_create_block_and_ipc_msg(&msg, true,
+ sst_drv_ctx, &block, IPC_CMD, pvt_id);
+ if (retval)
+ return retval;
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+ len = sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_PAUSE_STREAM_MRFLD,
+ str_info->pipe_id, 0);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ trace_sst_stream("PAUSE ->", str_id, str_info->pipe_id);
+ } else {
+ retval = sst_create_block_and_ipc_msg(&msg, false,
+ sst_drv_ctx, &block,
+ IPC_IA_PAUSE_STREAM, str_id);
+ if (retval)
+ return retval;
+ sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM,
+ 0, str_id);
+ }
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ sst_free_block(sst_drv_ctx, block);
+ if (retval == 0) {
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_PAUSED;
+ } else if (retval == SST_ERR_INVALID_STREAM_ID) {
+ retval = -EINVAL;
+ mutex_lock(&sst_drv_ctx->stream_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ }
+ } else {
+ retval = -EBADRQC;
+ pr_debug("SST DBG:BADRQC for stream\n ");
+ }
+
+ return retval;
+}
+
+/**
+ * sst_resume_stream - Send msg for resuming stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to resume
+ * an already paused stream.
+ */
+int sst_resume_stream(int str_id)
+{
+ int retval = 0;
+ struct ipc_post *msg = NULL;
+ struct stream_info *str_info;
+ struct intel_sst_ops *ops;
+ struct sst_block *block = NULL;
+ int pvt_id, len;
+ struct ipc_dsp_hdr dsp_hdr;
+
+ pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+ if (str_info->status == STREAM_RUNNING)
+ return 0;
+ if (str_info->status == STREAM_PAUSED) {
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ retval = sst_create_block_and_ipc_msg(&msg, true,
+ sst_drv_ctx, &block, IPC_CMD, pvt_id);
+ if (retval)
+ return retval;
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+ len = sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr,
+ IPC_IA_RESUME_STREAM_MRFLD,
+ str_info->pipe_id, 0);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ trace_sst_stream("RESUME->", str_id, str_info->pipe_id);
+ } else {
+ retval = sst_create_block_and_ipc_msg(&msg, false,
+ sst_drv_ctx, &block,
+ IPC_IA_RESUME_STREAM, str_id);
+ if (retval)
+ return retval;
+ sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM,
+ 0, str_id);
+ }
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ sst_free_block(sst_drv_ctx, block);
+ if (!retval) {
+ if (str_info->prev == STREAM_RUNNING)
+ str_info->status = STREAM_RUNNING;
+ else
+ str_info->status = STREAM_INIT;
+ str_info->prev = STREAM_PAUSED;
+ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
+ retval = -EINVAL;
+ mutex_lock(&sst_drv_ctx->stream_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ }
+ } else {
+ retval = -EBADRQC;
+ pr_err("SST ERR: BADQRC for stream\n");
+ }
+
+ return retval;
+}
+
+
+/**
+ * sst_drop_stream - Send msg for stopping stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to stop
+ * a stream.
+ */
+int sst_drop_stream(int str_id)
+{
+ int retval = 0, pvt_id;
+ struct stream_info *str_info;
+ struct ipc_post *msg = NULL;
+ struct ipc_dsp_hdr dsp_hdr;
+
+ pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+
+ if (str_info->status != STREAM_UN_INIT) {
+
+ if (sst_drv_ctx->use_32bit_ops == true) {
+ str_info->prev = STREAM_UN_INIT;
+ str_info->status = STREAM_INIT;
+ str_info->cumm_bytes = 0;
+ sst_send_sync_msg(IPC_IA_DROP_STREAM, str_id);
+ } else {
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+ str_info->prev = STREAM_UN_INIT;
+ str_info->status = STREAM_INIT;
+ str_info->cumm_bytes = 0;
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+
+ msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_DROP_STREAM_MRFLD,
+ str_info->pipe_id, 0);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ trace_sst_stream("STOP ->", str_id, str_info->pipe_id);
+ pr_info("Stop for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+ sst_drv_ctx->ops->sync_post_message(msg);
+ }
+ } else {
+ retval = -EBADRQC;
+ pr_debug("BADQRC for stream, state %x\n", str_info->status);
+ }
+ return retval;
+}
+
+/**
+ * sst_next_track: notify next track
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to
+ * set next track. Current this is NOP as FW doest care
+ */
+int sst_next_track(void)
+{
+ pr_debug("SST DBG: next_track");
+ return 0;
+}
+
+/**
+* sst_drain_stream - Send msg for draining stream
+* @str_id: stream ID
+*
+* This function is called by any function which wants to drain
+* a stream.
+*/
+int sst_drain_stream(int str_id, bool partial_drain)
+{
+ int retval = 0, pvt_id, len;
+ struct ipc_post *msg = NULL;
+ struct stream_info *str_info;
+ struct intel_sst_ops *ops;
+ struct sst_block *block = NULL;
+ struct ipc_dsp_hdr dsp_hdr;
+
+ pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+ if (str_info->status != STREAM_RUNNING &&
+ str_info->status != STREAM_INIT &&
+ str_info->status != STREAM_PAUSED) {
+ pr_err("SST ERR: BADQRC for stream = %d\n",
+ str_info->status);
+ return -EBADRQC;
+ }
+
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ retval = sst_create_block_and_ipc_msg(&msg, true,
+ sst_drv_ctx, &block, IPC_CMD, pvt_id);
+ if (retval)
+ return retval;
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+ pr_debug("header:%x\n",
+ (unsigned int)msg->mrfld_header.p.header_high.full);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+ len = sizeof(u8) + sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_DRAIN_STREAM_MRFLD,
+ str_info->pipe_id, sizeof(u8));
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+ &partial_drain, sizeof(u8));
+ trace_sst_stream("DRAIN ->", str_id, str_info->pipe_id);
+ } else {
+ retval = sst_create_block_and_ipc_msg(&msg, false,
+ sst_drv_ctx, &block,
+ IPC_IA_DRAIN_STREAM, str_id);
+ if (retval)
+ return retval;
+ sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
+ msg->header.part.data = partial_drain;
+ }
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ /* with new non blocked drain implementation in core we dont need to
+ * wait for respsonse, and need to only invoke callback for drain
+ * complete
+ */
+
+ sst_free_block(sst_drv_ctx, block);
+ return retval;
+}
+
+/**
+ * sst_free_stream - Frees a stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to free
+ * a stream.
+ */
+int sst_free_stream(int str_id)
+{
+ int retval = 0;
+ unsigned int pvt_id;
+ struct ipc_post *msg = NULL;
+ struct stream_info *str_info;
+ struct intel_sst_ops *ops;
+ unsigned long irq_flags;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct sst_block *block;
+
+ pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
+
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ if (sst_drv_ctx->sst_state == SST_UN_INIT) {
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ str_info = get_stream_info(str_id);
+ if (!str_info)
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+
+ mutex_lock(&str_info->lock);
+ if (str_info->status != STREAM_UN_INIT) {
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_UN_INIT;
+ mutex_unlock(&str_info->lock);
+
+ if (!sst_drv_ctx->use_32bit_ops) {
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ retval = sst_create_block_and_ipc_msg(&msg, true,
+ sst_drv_ctx, &block, IPC_CMD, pvt_id);
+ if (retval)
+ return retval;
+
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ str_info->task_id, 1, pvt_id);
+ msg->mrfld_header.p.header_low_payload =
+ sizeof(dsp_hdr);
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_FREE_STREAM_MRFLD,
+ str_info->pipe_id, 0);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ trace_sst_stream("FREE ->", str_id, str_info->pipe_id);
+ pr_info("Free for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+ } else {
+ retval = sst_create_block_and_ipc_msg(&msg, false,
+ sst_drv_ctx, &block,
+ IPC_IA_FREE_STREAM, str_id);
+ if (retval)
+ return retval;
+ sst_fill_header(&msg->header, IPC_IA_FREE_STREAM,
+ 0, str_id);
+ }
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ if (!sst_drv_ctx->use_32bit_ops) {
+ /*FIXME: do we need to wake up drain stream here,
+ * how to get the pvt_id and msg_id
+ */
+ } else {
+ sst_wake_up_block(sst_drv_ctx, 0, str_id,
+ IPC_IA_DRAIN_STREAM, NULL, 0);
+ }
+ ops->post_message(&sst_drv_ctx->ipc_post_msg_wq);
+ retval = sst_wait_timeout(sst_drv_ctx, block);
+ pr_debug("sst: wait for free returned %d\n", retval);
+ mutex_lock(&sst_drv_ctx->stream_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ pr_debug("SST DBG:Stream freed\n");
+ sst_free_block(sst_drv_ctx, block);
+ } else {
+ mutex_unlock(&str_info->lock);
+ retval = -EBADRQC;
+ pr_debug("SST DBG:BADQRC for stream\n");
+ }
+
+ return retval;
+}
+
+int sst_request_vtsv_file(char *fname, struct intel_sst_drv *ctx,
+ void **out_file, u32 *out_size)
+{
+ int retval = 0;
+ const struct firmware *file;
+ void *ddr_virt_addr;
+ unsigned long file_base;
+
+ if (!ctx->pdata->lib_info) {
+ pr_err("lib_info pointer NULL\n");
+ return -EINVAL;
+ }
+
+ pr_debug("Requesting VTSV file %s now...\n", fname);
+ retval = request_firmware(&file, fname, ctx->dev);
+ if (file == NULL) {
+ pr_err("VTSV file is returning as null\n");
+ return -EINVAL;
+ }
+ if (retval) {
+ pr_err("request fw failed %d\n", retval);
+ return retval;
+ }
+
+ if ((*out_file == NULL) || (*out_size < file->size)) {
+ retval = sst_get_next_lib_mem(&ctx->lib_mem_mgr, file->size,
+ &file_base);
+ *out_file = (void *)file_base;
+ }
+ ddr_virt_addr = (unsigned char *)ctx->ddr +
+ (unsigned long)(*out_file - ctx->pdata->lib_info->mod_base);
+ memcpy(ddr_virt_addr, file->data, file->size);
+
+ *out_size = file->size;
+ release_firmware(file);
+ return 0;
+}
+
+int sst_format_vtsv_message(struct intel_sst_drv *ctx,
+ struct ipc_post **msgptr, struct sst_block **block)
+{
+ int retval = 0, pvt_id, len;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct snd_sst_vtsv_info vinfo;
+ struct ipc_post *msg;
+
+ BUG_ON((unsigned long)(ctx->vcache.file1_in_mem) & 0xffffffff00000000ULL);
+ BUG_ON((unsigned long)(ctx->vcache.file2_in_mem) & 0xffffffff00000000ULL);
+
+ vinfo.vfiles[0].addr = (u32)((unsigned long)ctx->vcache.file1_in_mem
+ & 0xffffffff);
+ vinfo.vfiles[0].size = ctx->vcache.size1;
+ vinfo.vfiles[1].addr = (u32)((unsigned long)ctx->vcache.file2_in_mem
+ & 0xffffffff);
+ vinfo.vfiles[1].size = ctx->vcache.size2;
+
+ /* Create the vtsv message */
+ pvt_id = sst_assign_pvt_id(ctx);
+ retval = sst_create_block_and_ipc_msg(msgptr, true,
+ ctx, block, IPC_CMD, pvt_id);
+ if (retval)
+ return retval;
+ msg = *msgptr;
+ sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+ SST_TASK_ID_AWARE, 1, pvt_id);
+ pr_debug("header:%x\n",
+ (unsigned int)msg->mrfld_header.p.header_high.full);
+ msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+ len = sizeof(vinfo) + sizeof(dsp_hdr);
+ msg->mrfld_header.p.header_low_payload = len;
+ sst_fill_header_dsp(&dsp_hdr, IPC_IA_VTSV_UPDATE_MODULES,
+ PIPE_VAD_OUT, sizeof(u8));
+ dsp_hdr.mod_id = SST_ALGO_VTSV;
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+ &vinfo, sizeof(vinfo));
+ return 0;
+}
+
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx)
+{
+ int retval = 0;
+ struct ipc_post *msg = NULL;
+ struct sst_block *block = NULL;
+
+ /* Download both the data files */
+ retval = sst_request_vtsv_file("vtsv_net.bin", ctx,
+ &ctx->vcache.file1_in_mem, &ctx->vcache.size1);
+ if (retval) {
+ pr_err("vtsv data file1 request failed %d\n", retval);
+ return retval;
+ }
+
+ retval = sst_request_vtsv_file("vtsv_grammar.bin", ctx,
+ &ctx->vcache.file2_in_mem, &ctx->vcache.size2);
+ if (retval) {
+ pr_err("vtsv data file2 request failed %d\n", retval);
+ return retval;
+ }
+
+ retval = sst_format_vtsv_message(ctx, &msg, &block);
+ if (retval) {
+ pr_err("vtsv msg format failed %d\n", retval);
+ return retval;
+ }
+ sst_add_to_dispatch_list_and_post(ctx, msg);
+ retval = sst_wait_timeout(ctx, block);
+ if (retval)
+ pr_err("vtsv msg send to fw failed %d\n", retval);
+
+ sst_free_block(ctx, block);
+ return retval;
+}
--- /dev/null
+/*
+ * sst_trace.h - Intel SST Driver tracing support
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Authors: Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sst
+
+#if !defined(_TRACE_SST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SST_H
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sst_ipc,
+
+ TP_PROTO(const char *msg, u32 header_high, u32 header_low, int pvt_id),
+
+ TP_ARGS(msg, header_high, header_low, pvt_id),
+
+ TP_STRUCT__entry(
+ __string(info_msg, msg)
+ __field(unsigned int, val_l)
+ __field(unsigned int, val_h)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(info_msg, msg);
+ __entry->val_l = header_low;
+ __entry->val_h = header_high;
+ __entry->id = pvt_id;
+ ),
+
+ TP_printk("\t%s\t [%2u] = %#8.8x:%.4x", __get_str(info_msg),
+ (unsigned int)__entry->id,
+ (unsigned int)__entry->val_h, (unsigned int)__entry->val_l)
+
+);
+
+TRACE_EVENT(sst_stream,
+
+ TP_PROTO(const char *msg, int str_id, int pipe_id),
+
+ TP_ARGS(msg, str_id, pipe_id),
+
+ TP_STRUCT__entry(
+ __string(info_msg, msg)
+ __field(unsigned int, str_id)
+ __field(unsigned int, pipe_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(info_msg, msg);
+ __entry->str_id = str_id;
+ __entry->pipe_id = pipe_id;
+ ),
+
+ TP_printk("\t%s\t str = %2u, pipe = %#x", __get_str(info_msg),
+ (unsigned int)__entry->str_id, (unsigned int)__entry->pipe_id)
+);
+
+TRACE_EVENT(sst_ipc_mailbox,
+
+ TP_PROTO(const char *mailbox, int mbox_len),
+
+ TP_ARGS(mailbox, mbox_len),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, mbox, (3 * mbox_len))
+ ),
+
+ TP_fast_assign(
+ sst_dump_to_buffer(mailbox, mbox_len,
+ __get_dynamic_array(mbox));
+ ),
+
+ TP_printk(" %s", __get_str(mbox))
+
+);
+
+TRACE_EVENT(sst_lib_download,
+
+ TP_PROTO(const char *msg, const char *lib_name),
+
+ TP_ARGS(msg, lib_name),
+
+ TP_STRUCT__entry(
+ __string(info_msg, msg)
+ __string(info_lib_name, lib_name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(info_msg, msg);
+ __assign_str(info_lib_name, lib_name);
+ ),
+
+ TP_printk("\t%s %s", __get_str(info_msg),
+ __get_str(info_lib_name))
+);
+
+TRACE_EVENT(sst_fw_download,
+
+ TP_PROTO(const char *msg, int fw_state),
+
+ TP_ARGS(msg, fw_state),
+
+ TP_STRUCT__entry(
+ __string(info_msg, msg)
+ __field(unsigned int, fw_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(info_msg, msg);
+ __entry->fw_state = fw_state;
+ ),
+
+ TP_printk("\t%s\tFW state = %d", __get_str(info_msg),
+ (unsigned int)__entry->fw_state)
+);
+
+#endif /* _TRACE_SST_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sst_trace
+#include <trace/define_trace.h>
*
*/
-#ifndef __SST_PLATFORMDRV_H__
-#define __SST_PLATFORMDRV_H__
-
-#include "sst_dsp.h"
-
-#define SST_MONO 1
-#define SST_STEREO 2
-#define SST_MAX_CAP 5
-
-#define SST_MIN_RATE 8000
-#define SST_MAX_RATE 48000
-#define SST_MIN_CHANNEL 1
-#define SST_MAX_CHANNEL 5
-#define SST_MAX_BUFFER (800*1024)
-#define SST_MIN_BUFFER (800*1024)
-#define SST_MIN_PERIOD_BYTES 32
-#define SST_MAX_PERIOD_BYTES SST_MAX_BUFFER
-#define SST_MIN_PERIODS 2
-#define SST_MAX_PERIODS (1024*2)
-#define SST_FIFO_SIZE 0
+#ifndef __SST_PLATFORM_H__
+#define __SST_PLATFORM_H__
-struct pcm_stream_info {
- int str_id;
- void *mad_substream;
- void (*period_elapsed) (void *mad_substream);
- unsigned long long buffer_ptr;
- int sfreq;
-};
+#include <sound/soc.h>
-enum sst_drv_status {
- SST_PLATFORM_INIT = 1,
- SST_PLATFORM_STARTED,
- SST_PLATFORM_RUNNING,
- SST_PLATFORM_PAUSED,
- SST_PLATFORM_DROPPED,
-};
+#define SST_MAX_BIN_BYTES 1024
-enum sst_controls {
- SST_SND_ALLOC = 0x00,
- SST_SND_PAUSE = 0x01,
- SST_SND_RESUME = 0x02,
- SST_SND_DROP = 0x03,
- SST_SND_FREE = 0x04,
- SST_SND_BUFFER_POINTER = 0x05,
- SST_SND_STREAM_INIT = 0x06,
- SST_SND_START = 0x07,
- SST_MAX_CONTROLS = 0x07,
-};
-
-enum sst_stream_ops {
- STREAM_OPS_PLAYBACK = 0,
- STREAM_OPS_CAPTURE,
-};
+struct sst_data;
enum sst_audio_device_type {
SND_SST_DEVICE_HEADSET = 1,
SND_SST_DEVICE_COMPRESS,
};
-/* PCM Parameters */
-struct sst_pcm_params {
- u16 codec; /* codec type */
- u8 num_chan; /* 1=Mono, 2=Stereo */
- u8 pcm_wd_sz; /* 16/24 - bit*/
- u32 reserved; /* Bitrate in bits per second */
- u32 sfreq; /* Sampling rate in Hz */
- u32 ring_buffer_size;
- u32 period_count; /* period elapsed in samples*/
- u32 ring_buffer_addr;
+enum snd_sst_input_stream {
+ SST_INPUT_STREAM_NONE = 0x0,
+ SST_INPUT_STREAM_PCM = 0x6,
+ SST_INPUT_STREAM_COMPRESS = 0x8,
+ SST_INPUT_STREAM_MIXED = 0xE,
};
-struct sst_stream_params {
- u32 result;
- u32 stream_id;
- u8 codec;
- u8 ops;
- u8 stream_type;
- u8 device_type;
- struct sst_pcm_params sparams;
+enum sst_stream_ops {
+ STREAM_OPS_PLAYBACK = 0, /* Decode */
+ STREAM_OPS_CAPTURE, /* Encode */
+ STREAM_OPS_COMPRESSED_PATH, /* Offload playback/capture */
+
+};
+enum snd_sst_stream_type {
+ SST_STREAM_DEVICE_HS = 32,
+ SST_STREAM_DEVICE_IHF = 33,
+ SST_STREAM_DEVICE_MIC0 = 34,
+ SST_STREAM_DEVICE_MIC1 = 35,
+};
+
+enum sst_controls {
+ SST_SND_ALLOC = 0x1000,
+ SST_SND_PAUSE = 0x1001,
+ SST_SND_RESUME = 0x1002,
+ SST_SND_DROP = 0x1003,
+ SST_SND_FREE = 0x1004,
+ SST_SND_BUFFER_POINTER = 0x1005,
+ SST_SND_STREAM_INIT = 0x1006,
+ SST_SND_START = 0x1007,
+ SST_SET_RUNTIME_PARAMS = 0x1008,
+ SST_SET_ALGO_PARAMS = 0x1009,
+ SST_SET_BYTE_STREAM = 0x100A,
+ SST_GET_BYTE_STREAM = 0x100B,
+ SST_SET_SSP_CONFIG = 0x100C,
+ SST_SET_PROBE_BYTE_STREAM = 0x100D,
+ SST_GET_PROBE_BYTE_STREAM = 0x100E,
+ SST_SET_VTSV_INFO = 0x100F,
+};
+
+struct pcm_stream_info {
+ int str_id;
+ void *mad_substream;
+ void (*period_elapsed) (void *mad_substream);
+ unsigned long long buffer_ptr;
+ unsigned long long pcm_delay;
+ int sfreq;
};
struct sst_compress_cb {
void *param;
void (*compr_cb)(void *param);
+ void *drain_cb_param;
+ void (*drain_notify)(void *param);
+
};
+struct snd_sst_params;
+
struct compress_sst_ops {
const char *name;
int (*open) (struct snd_sst_params *str_params,
int (*close) (unsigned int str_id);
int (*get_caps) (struct snd_compr_caps *caps);
int (*get_codec_caps) (struct snd_compr_codec_caps *codec);
- int (*set_metadata) (unsigned int str_id,
- struct snd_compr_metadata *mdata);
+ int (*set_metadata) (unsigned int str_id, struct snd_compr_metadata *metadata);
+
+};
+
+enum lpe_param_types_mixer {
+ SST_ALGO_PARAM_MIXER_STREAM_CFG = 0x801,
+};
+struct mad_ops_wq {
+ int stream_id;
+ enum sst_controls control_op;
+ struct work_struct wq;
};
struct sst_ops {
- int (*open) (struct sst_stream_params *str_param);
+ int (*open) (struct snd_sst_params *str_param);
int (*device_control) (int cmd, void *arg);
+ int (*set_generic_params) (enum sst_controls cmd, void *arg);
int (*close) (unsigned int str_id);
+ int (*power) (bool state);
};
struct sst_runtime_stream {
char *name;
struct device *dev;
struct sst_ops *ops;
+ struct platform_device *pdev;
struct compress_sst_ops *compr_ops;
};
--- /dev/null
+/*
+ * sst_platform_pvt.h - Intel MID Platform driver header file
+ *
+ * Copyright (C) 2010 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * Author: Harsha Priya <priya.harsha@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORM_PVT_H__
+#define __SST_PLATFORM_PVT_H__
+
+/* TODO rmv this global */
+extern struct sst_device *sst_dsp;
+
+#define SST_MONO 1
+#define SST_STEREO 2
+
+#define SST_MIN_RATE 8000
+#define SST_MAX_RATE 48000
+#define SST_MIN_CHANNEL 1
+#define SST_MAX_CHANNEL 2
+
+#define SST_MAX_BUFFER 96000 /*500ms@48K,16bit,2ch - CLV*/
+#define SST_MIN_PERIOD_BYTES 1536 /*24ms@16K,16bit,2ch - For VoIP on Mrfld*/
+#define SST_MAX_PERIOD_BYTES 48000 /*250ms@48K,16bit,2ch - CLV*/
+
+#define SST_MIN_PERIODS 2
+#define SST_MAX_PERIODS 50
+#define SST_FIFO_SIZE 0
+#define SST_CODEC_TYPE_PCM 1
+
+#define SST_HEADSET_DAI "Headset-cpu-dai"
+#define SST_SPEAKER_DAI "Speaker-cpu-dai"
+#define SST_VOICE_DAI "Voice-cpu-dai"
+#define SST_VIRTUAL_DAI "Virtual-cpu-dai"
+#define SST_LOOPBACK_DAI "Loopback-cpu-dai"
+#define SST_POWER_DAI "Power-cpu-dai"
+#define SST_COMPRESS_DAI "Compress-cpu-dai"
+#define SST_PROBE_DAI "Probe-cpu-dai"
+#define SST_VOIP_DAI "Voip-cpu-dai"
+#define SST_DEEPBUFFER_DAI "Deepbuffer-cpu-dai"
+#define SST_LOWLATENCY_DAI "Lowlatency-cpu-dai"
+
+struct sst_device;
+
+enum sst_drv_status {
+ SST_PLATFORM_UNINIT,
+ SST_PLATFORM_INIT,
+ SST_PLATFORM_RUNNING,
+ SST_PLATFORM_PAUSED,
+ SST_PLATFORM_DROPPED,
+};
+
+#define SST_PIPE_CONTROL 0x0
+#define SST_COMPRESS_VOL 0x01
+
+int sst_platform_clv_init(struct snd_soc_platform *platform);
+int sst_dsp_init(struct snd_soc_platform *platform);
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform);
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute);
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform, unsigned int reg);
+int sst_soc_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val);
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+ unsigned int shift, unsigned int max);
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+ unsigned int shift, unsigned int max, unsigned int val);
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+void sst_set_stream_status(struct sst_runtime_stream *stream, int state);
+int sst_fill_stream_params(void *substream, const struct sst_data *ctx,
+ struct snd_sst_params *str_params, bool is_compress);
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe,
+ int substream, int direction, bool on);
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+struct sst_algo_int_control_v2 {
+ struct soc_mixer_control mc;
+ u16 module_id; /* module identifieer */
+ u16 pipe_id; /* location info: pipe_id + instance_id */
+ u16 instance_id;
+ unsigned int value; /* Value received is stored here */
+};
+
+struct sst_lowlatency_deepbuff {
+ /* Thresholds for low latency & deep buffer */
+ unsigned long *low_latency;
+ unsigned long *deep_buffer;
+ unsigned long period_time;
+};
+
+struct sst_data {
+ struct platform_device *pdev;
+ struct sst_platform_data *pdata;
+ unsigned int lpe_mixer_input_ihf;
+ unsigned int lpe_mixer_input_hs;
+ u32 *widget;
+ char *byte_stream;
+ struct mutex lock;
+ /* Pipe_id for probe_stream to be saved in stream map */
+ u8 pipe_id;
+ bool vtsv_enroll;
+ struct sst_lowlatency_deepbuff ll_db;
+};
+#endif
+++ /dev/null
-config SND_MFLD_MACHINE
- tristate "SOC Machine Audio driver for Intel Medfield MID platform"
- depends on INTEL_SCU_IPC
- select SND_SOC_SN95031
- select SND_SST_PLATFORM
- help
- This adds support for ASoC machine driver for Intel(R) MID Medfield platform
- used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device
- If unsure select "N".
-
-config SND_SST_PLATFORM
- tristate
+++ /dev/null
-snd-soc-sst-platform-objs := sst_platform.o
-snd-soc-mfld-machine-objs := mfld_machine.o
-
-obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
-obj-$(CONFIG_SND_MFLD_MACHINE) += snd-soc-mfld-machine.o
+++ /dev/null
-/*
- * mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
- MFLD_MV_START = 0,
- /* mic bias volutage range for Headphones*/
- MFLD_MV_HP = 400,
- /* mic bias volutage range for American Headset*/
- MFLD_MV_AM_HS = 650,
- /* mic bias volutage range for Headset*/
- MFLD_MV_HS = 2000,
- MFLD_MV_UNDEFINED,
-};
-
-static unsigned int hs_switch;
-static unsigned int lo_dac;
-
-struct mfld_mc_private {
- void __iomem *int_base;
- u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
- {
- .pin = "AMIC1",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
- {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
- {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char *headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
- SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
- SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] = hs_switch;
- return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-
- if (ucontrol->value.integer.value[0] == hs_switch)
- return 0;
-
- if (ucontrol->value.integer.value[0]) {
- pr_debug("hs_set HS path\n");
- snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
- snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
- } else {
- pr_debug("hs_set EP path\n");
- snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
- snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
- }
- snd_soc_dapm_sync(&codec->dapm);
- hs_switch = ucontrol->value.integer.value[0];
-
- return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_codec *codec)
-{
- snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTL");
- snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTR");
- snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTL");
- snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTR");
- snd_soc_dapm_enable_pin(&codec->dapm, "VIB1OUT");
- snd_soc_dapm_enable_pin(&codec->dapm, "VIB2OUT");
- if (hs_switch) {
- snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
- snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
- } else {
- snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
- snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
- }
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] = lo_dac;
- return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-
- if (ucontrol->value.integer.value[0] == lo_dac)
- return 0;
-
- /* we dont want to work with last state of lineout so just enable all
- * pins and then disable pins not required
- */
- lo_enable_out_pins(codec);
- switch (ucontrol->value.integer.value[0]) {
- case 0:
- pr_debug("set vibra path\n");
- snd_soc_dapm_disable_pin(&codec->dapm, "VIB1OUT");
- snd_soc_dapm_disable_pin(&codec->dapm, "VIB2OUT");
- snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0);
- break;
-
- case 1:
- pr_debug("set hs path\n");
- snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
- snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
- snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x22);
- break;
-
- case 2:
- pr_debug("set spkr path\n");
- snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTL");
- snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTR");
- snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x44);
- break;
-
- case 3:
- pr_debug("set null path\n");
- snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTR");
- snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x66);
- break;
- }
- snd_soc_dapm_sync(&codec->dapm);
- lo_dac = ucontrol->value.integer.value[0];
- return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
- SOC_ENUM_EXT("Playback Switch", headset_enum,
- headset_get_switch, headset_set_switch),
- SOC_ENUM_EXT("Lineout Mux", lo_enum,
- lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
- {"Headphones", NULL, "HPOUTR"},
- {"Headphones", NULL, "HPOUTL"},
- {"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
- struct mfld_jack_data jack_data;
-
- jack_data.mfld_jack = &mfld_jack;
- jack_data.intr_id = intr_status;
-
- sn95031_jack_detection(&jack_data);
- /* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_codec *codec = runtime->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret_val;
-
- /* Add jack sense widgets */
- snd_soc_dapm_new_controls(dapm, mfld_widgets, ARRAY_SIZE(mfld_widgets));
-
- /* Set up the map */
- snd_soc_dapm_add_routes(dapm, mfld_map, ARRAY_SIZE(mfld_map));
-
- /* always connected */
- snd_soc_dapm_enable_pin(dapm, "Headphones");
- snd_soc_dapm_enable_pin(dapm, "Mic");
-
- ret_val = snd_soc_add_codec_controls(codec, mfld_snd_controls,
- ARRAY_SIZE(mfld_snd_controls));
- if (ret_val) {
- pr_err("soc_add_controls failed %d", ret_val);
- return ret_val;
- }
- /* default is earpiece pin, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "Headphones");
- /* default is lineout NC, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
- lo_dac = 3;
- hs_switch = 0;
- /* we dont use linein in this so set to NC */
- snd_soc_dapm_disable_pin(dapm, "LINEINL");
- snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
- /* Headset and button jack detection */
- ret_val = snd_soc_jack_new(codec, "Intel(R) MID Audio Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1, &mfld_jack);
- if (ret_val) {
- pr_err("jack creation failed\n");
- return ret_val;
- }
-
- ret_val = snd_soc_jack_add_pins(&mfld_jack,
- ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins);
- if (ret_val) {
- pr_err("adding jack pins failed\n");
- return ret_val;
- }
- ret_val = snd_soc_jack_add_zones(&mfld_jack,
- ARRAY_SIZE(mfld_zones), mfld_zones);
- if (ret_val) {
- pr_err("adding jack zones failed\n");
- return ret_val;
- }
-
- /* we want to check if anything is inserted at boot,
- * so send a fake event to codec and it will read adc
- * to find if anything is there or not */
- mfld_jack_check(MFLD_JACK_INSERT);
- return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
- {
- .name = "Medfield Headset",
- .stream_name = "Headset",
- .cpu_dai_name = "Headset-cpu-dai",
- .codec_dai_name = "SN95031 Headset",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = mfld_init,
- },
- {
- .name = "Medfield Speaker",
- .stream_name = "Speaker",
- .cpu_dai_name = "Speaker-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Vibra",
- .stream_name = "Vibra1",
- .cpu_dai_name = "Vibra1-cpu-dai",
- .codec_dai_name = "SN95031 Vibra1",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Haptics",
- .stream_name = "Vibra2",
- .cpu_dai_name = "Vibra2-cpu-dai",
- .codec_dai_name = "SN95031 Vibra2",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Compress",
- .stream_name = "Speaker",
- .cpu_dai_name = "Compress-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
- .name = "medfield_audio",
- .owner = THIS_MODULE,
- .dai_link = mfld_msic_dailink,
- .num_links = ARRAY_SIZE(mfld_msic_dailink),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
- struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
- memcpy_fromio(&mc_private->interrupt_status,
- ((void *)(mc_private->int_base)),
- sizeof(u8));
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
- struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
- if (mfld_jack.codec == NULL)
- return IRQ_HANDLED;
- mfld_jack_check(mc_drv_ctx->interrupt_status);
-
- return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
- int ret_val = 0, irq;
- struct mfld_mc_private *mc_drv_ctx;
- struct resource *irq_mem;
-
- pr_debug("snd_mfld_mc_probe called\n");
-
- /* retrive the irq number */
- irq = platform_get_irq(pdev, 0);
-
- /* audio interrupt base of SRAM location where
- * interrupts are stored by System FW */
- mc_drv_ctx = kzalloc(sizeof(*mc_drv_ctx), GFP_ATOMIC);
- if (!mc_drv_ctx) {
- pr_err("allocation failed\n");
- return -ENOMEM;
- }
-
- irq_mem = platform_get_resource_byname(
- pdev, IORESOURCE_MEM, "IRQ_BASE");
- if (!irq_mem) {
- pr_err("no mem resource given\n");
- ret_val = -ENODEV;
- goto unalloc;
- }
- mc_drv_ctx->int_base = ioremap_nocache(irq_mem->start,
- resource_size(irq_mem));
- if (!mc_drv_ctx->int_base) {
- pr_err("Mapping of cache failed\n");
- ret_val = -ENOMEM;
- goto unalloc;
- }
- /* register for interrupt */
- ret_val = request_threaded_irq(irq, snd_mfld_jack_intr_handler,
- snd_mfld_jack_detection,
- IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
- if (ret_val) {
- pr_err("cannot register IRQ\n");
- goto unalloc;
- }
- /* register the soc card */
- snd_soc_card_mfld.dev = &pdev->dev;
- ret_val = snd_soc_register_card(&snd_soc_card_mfld);
- if (ret_val) {
- pr_debug("snd_soc_register_card failed %d\n", ret_val);
- goto freeirq;
- }
- platform_set_drvdata(pdev, mc_drv_ctx);
- pr_debug("successfully exited probe\n");
- return ret_val;
-
-freeirq:
- free_irq(irq, mc_drv_ctx);
-unalloc:
- kfree(mc_drv_ctx);
- return ret_val;
-}
-
-static int snd_mfld_mc_remove(struct platform_device *pdev)
-{
- struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev);
-
- pr_debug("snd_mfld_mc_remove called\n");
- free_irq(platform_get_irq(pdev, 0), mc_drv_ctx);
- snd_soc_unregister_card(&snd_soc_card_mfld);
- kfree(mc_drv_ctx);
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "msic_audio",
- },
- .probe = snd_mfld_mc_probe,
- .remove = snd_mfld_mc_remove,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
+++ /dev/null
-#ifndef __SST_DSP_H__
-#define __SST_DSP_H__
-/*
- * sst_dsp.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-12 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-enum sst_codec_types {
- /* AUDIO/MUSIC CODEC Type Definitions */
- SST_CODEC_TYPE_UNKNOWN = 0,
- SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
- SST_CODEC_TYPE_MP3,
- SST_CODEC_TYPE_MP24,
- SST_CODEC_TYPE_AAC,
- SST_CODEC_TYPE_AACP,
- SST_CODEC_TYPE_eAACP,
-};
-
-enum stream_type {
- SST_STREAM_TYPE_NONE = 0,
- SST_STREAM_TYPE_MUSIC = 1,
-};
-
-struct snd_pcm_params {
- u16 codec; /* codec type */
- u8 num_chan; /* 1=Mono, 2=Stereo */
- u8 pcm_wd_sz; /* 16/24 - bit*/
- u32 reserved; /* Bitrate in bits per second */
- u32 sfreq; /* Sampling rate in Hz */
- u8 use_offload_path;
- u8 reserved2;
- u16 reserved3;
- u8 channel_map[8];
-} __packed;
-
-/* MP3 Music Parameters Message */
-struct snd_mp3_params {
- u16 codec;
- u8 num_chan; /* 1=Mono, 2=Stereo */
- u8 pcm_wd_sz; /* 16/24 - bit*/
- u8 crc_check; /* crc_check - disable (0) or enable (1) */
- u8 reserved1; /* unused*/
- u16 reserved2; /* Unused */
-} __packed;
-
-#define AAC_BIT_STREAM_ADTS 0
-#define AAC_BIT_STREAM_ADIF 1
-#define AAC_BIT_STREAM_RAW 2
-
-/* AAC Music Parameters Message */
-struct snd_aac_params {
- u16 codec;
- u8 num_chan; /* 1=Mono, 2=Stereo*/
- u8 pcm_wd_sz; /* 16/24 - bit*/
- u8 bdownsample; /*SBR downsampling 0 - disable 1 -enabled AAC+ only */
- u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
- u16 reser2;
- u32 externalsr; /*sampling rate of basic AAC raw bit stream*/
- u8 sbr_signalling;/*disable/enable/set automode the SBR tool.AAC+*/
- u8 reser1;
- u16 reser3;
-} __packed;
-
-/* WMA Music Parameters Message */
-struct snd_wma_params {
- u16 codec;
- u8 num_chan; /* 1=Mono, 2=Stereo */
- u8 pcm_wd_sz; /* 16/24 - bit*/
- u32 brate; /* Use the hard coded value. */
- u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- u32 channel_mask; /* Channel Mask */
- u16 format_tag; /* Format Tag */
- u16 block_align; /* packet size */
- u16 wma_encode_opt;/* Encoder option */
- u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
- u8 reserved; /* reserved */
-} __packed;
-
-/* Codec params struture */
-union snd_sst_codec_params {
- struct snd_pcm_params pcm_params;
- struct snd_mp3_params mp3_params;
- struct snd_aac_params aac_params;
- struct snd_wma_params wma_params;
-} __packed;
-
-/* Address and size info of a frame buffer */
-struct sst_address_info {
- u32 addr; /* Address at IA */
- u32 size; /* Size of the buffer */
-};
-
-struct snd_sst_alloc_params_ext {
- struct sst_address_info ring_buf_info[8];
- u8 sg_count;
- u8 reserved;
- u16 reserved2;
- u32 frag_size; /*Number of samples after which period elapsed
- message is sent valid only if path = 0*/
-} __packed;
-
-struct snd_sst_stream_params {
- union snd_sst_codec_params uc;
-} __packed;
-
-struct snd_sst_params {
- u32 stream_id;
- u8 codec;
- u8 ops;
- u8 stream_type;
- u8 device_type;
- struct snd_sst_stream_params sparams;
- struct snd_sst_alloc_params_ext aparams;
-};
-
-#endif /* __SST_DSP_H__ */
+++ /dev/null
-/*
- * sst_platform.c - Intel MID Platform driver
- *
- * Copyright (C) 2010-2013 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/compress_driver.h>
-#include "sst_platform.h"
-
-static struct sst_device *sst;
-static DEFINE_MUTEX(sst_lock);
-
-int sst_register_dsp(struct sst_device *dev)
-{
- BUG_ON(!dev);
- if (!try_module_get(dev->dev->driver->owner))
- return -ENODEV;
- mutex_lock(&sst_lock);
- if (sst) {
- pr_err("we already have a device %s\n", sst->name);
- module_put(dev->dev->driver->owner);
- mutex_unlock(&sst_lock);
- return -EEXIST;
- }
- pr_debug("registering device %s\n", dev->name);
- sst = dev;
- mutex_unlock(&sst_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(sst_register_dsp);
-
-int sst_unregister_dsp(struct sst_device *dev)
-{
- BUG_ON(!dev);
- if (dev != sst)
- return -EINVAL;
-
- mutex_lock(&sst_lock);
-
- if (!sst) {
- mutex_unlock(&sst_lock);
- return -EIO;
- }
-
- module_put(sst->dev->driver->owner);
- pr_debug("unreg %s\n", sst->name);
- sst = NULL;
- mutex_unlock(&sst_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(sst_unregister_dsp);
-
-static struct snd_pcm_hardware sst_platform_pcm_hw = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_DOUBLE |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME |
- SNDRV_PCM_INFO_MMAP|
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_SYNC_START),
- .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
- SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
- SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
- .rates = (SNDRV_PCM_RATE_8000|
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000),
- .rate_min = SST_MIN_RATE,
- .rate_max = SST_MAX_RATE,
- .channels_min = SST_MIN_CHANNEL,
- .channels_max = SST_MAX_CHANNEL,
- .buffer_bytes_max = SST_MAX_BUFFER,
- .period_bytes_min = SST_MIN_PERIOD_BYTES,
- .period_bytes_max = SST_MAX_PERIOD_BYTES,
- .periods_min = SST_MIN_PERIODS,
- .periods_max = SST_MAX_PERIODS,
- .fifo_size = SST_FIFO_SIZE,
-};
-
-/* MFLD - MSIC */
-static struct snd_soc_dai_driver sst_platform_dai[] = {
-{
- .name = "Headset-cpu-dai",
- .id = 0,
- .playback = {
- .channels_min = SST_STEREO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 5,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
-},
-{
- .name = "Speaker-cpu-dai",
- .id = 1,
- .playback = {
- .channels_min = SST_MONO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
-},
-{
- .name = "Vibra1-cpu-dai",
- .id = 2,
- .playback = {
- .channels_min = SST_MONO,
- .channels_max = SST_MONO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
-},
-{
- .name = "Vibra2-cpu-dai",
- .id = 3,
- .playback = {
- .channels_min = SST_MONO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
-},
-{
- .name = "Compress-cpu-dai",
- .compress_dai = 1,
- .playback = {
- .channels_min = SST_STEREO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
-},
-};
-
-static const struct snd_soc_component_driver sst_component = {
- .name = "sst",
-};
-
-/* helper functions */
-static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
- int state)
-{
- unsigned long flags;
- spin_lock_irqsave(&stream->status_lock, flags);
- stream->stream_status = state;
- spin_unlock_irqrestore(&stream->status_lock, flags);
-}
-
-static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
-{
- int state;
- unsigned long flags;
-
- spin_lock_irqsave(&stream->status_lock, flags);
- state = stream->stream_status;
- spin_unlock_irqrestore(&stream->status_lock, flags);
- return state;
-}
-
-static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
- struct sst_pcm_params *param)
-{
-
- param->codec = SST_CODEC_TYPE_PCM;
- param->num_chan = (u8) substream->runtime->channels;
- param->pcm_wd_sz = substream->runtime->sample_bits;
- param->reserved = 0;
- param->sfreq = substream->runtime->rate;
- param->ring_buffer_size = snd_pcm_lib_buffer_bytes(substream);
- param->period_count = substream->runtime->period_size;
- param->ring_buffer_addr = virt_to_phys(substream->dma_buffer.area);
- pr_debug("period_cnt = %d\n", param->period_count);
- pr_debug("sfreq= %d, wd_sz = %d\n", param->sfreq, param->pcm_wd_sz);
-}
-
-static int sst_platform_alloc_stream(struct snd_pcm_substream *substream)
-{
- struct sst_runtime_stream *stream =
- substream->runtime->private_data;
- struct sst_pcm_params param = {0};
- struct sst_stream_params str_params = {0};
- int ret_val;
-
- /* set codec params and inform SST driver the same */
- sst_fill_pcm_params(substream, ¶m);
- substream->runtime->dma_area = substream->dma_buffer.area;
- str_params.sparams = param;
- str_params.codec = param.codec;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- str_params.ops = STREAM_OPS_PLAYBACK;
- str_params.device_type = substream->pcm->device + 1;
- pr_debug("Playbck stream,Device %d\n",
- substream->pcm->device);
- } else {
- str_params.ops = STREAM_OPS_CAPTURE;
- str_params.device_type = SND_SST_DEVICE_CAPTURE;
- pr_debug("Capture stream,Device %d\n",
- substream->pcm->device);
- }
- ret_val = stream->ops->open(&str_params);
- pr_debug("SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
- if (ret_val < 0)
- return ret_val;
-
- stream->stream_info.str_id = ret_val;
- pr_debug("str id : %d\n", stream->stream_info.str_id);
- return ret_val;
-}
-
-static void sst_period_elapsed(void *mad_substream)
-{
- struct snd_pcm_substream *substream = mad_substream;
- struct sst_runtime_stream *stream;
- int status;
-
- if (!substream || !substream->runtime)
- return;
- stream = substream->runtime->private_data;
- if (!stream)
- return;
- status = sst_get_stream_status(stream);
- if (status != SST_PLATFORM_RUNNING)
- return;
- snd_pcm_period_elapsed(substream);
-}
-
-static int sst_platform_init_stream(struct snd_pcm_substream *substream)
-{
- struct sst_runtime_stream *stream =
- substream->runtime->private_data;
- int ret_val;
-
- pr_debug("setting buffer ptr param\n");
- sst_set_stream_status(stream, SST_PLATFORM_INIT);
- stream->stream_info.period_elapsed = sst_period_elapsed;
- stream->stream_info.mad_substream = substream;
- stream->stream_info.buffer_ptr = 0;
- stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = stream->ops->device_control(
- SST_SND_STREAM_INIT, &stream->stream_info);
- if (ret_val)
- pr_err("control_set ret error %d\n", ret_val);
- return ret_val;
-
-}
-/* end -- helper functions */
-
-static int sst_platform_open(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct sst_runtime_stream *stream;
- int ret_val;
-
- pr_debug("sst_platform_open called\n");
-
- snd_soc_set_runtime_hwparams(substream, &sst_platform_pcm_hw);
- ret_val = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
- if (ret_val < 0)
- return ret_val;
-
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
- spin_lock_init(&stream->status_lock);
-
- /* get the sst ops */
- mutex_lock(&sst_lock);
- if (!sst) {
- pr_err("no device available to run\n");
- mutex_unlock(&sst_lock);
- kfree(stream);
- return -ENODEV;
- }
- if (!try_module_get(sst->dev->driver->owner)) {
- mutex_unlock(&sst_lock);
- kfree(stream);
- return -ENODEV;
- }
- stream->ops = sst->ops;
- mutex_unlock(&sst_lock);
-
- stream->stream_info.str_id = 0;
- sst_set_stream_status(stream, SST_PLATFORM_INIT);
- stream->stream_info.mad_substream = substream;
- /* allocate memory for SST API set */
- runtime->private_data = stream;
-
- return 0;
-}
-
-static int sst_platform_close(struct snd_pcm_substream *substream)
-{
- struct sst_runtime_stream *stream;
- int ret_val = 0, str_id;
-
- pr_debug("sst_platform_close called\n");
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
- if (str_id)
- ret_val = stream->ops->close(str_id);
- module_put(sst->dev->driver->owner);
- kfree(stream);
- return ret_val;
-}
-
-static int sst_platform_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct sst_runtime_stream *stream;
- int ret_val = 0, str_id;
-
- pr_debug("sst_platform_pcm_prepare called\n");
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
- if (stream->stream_info.str_id) {
- ret_val = stream->ops->device_control(
- SST_SND_DROP, &str_id);
- return ret_val;
- }
-
- ret_val = sst_platform_alloc_stream(substream);
- if (ret_val < 0)
- return ret_val;
- snprintf(substream->pcm->id, sizeof(substream->pcm->id),
- "%d", stream->stream_info.str_id);
-
- ret_val = sst_platform_init_stream(substream);
- if (ret_val)
- return ret_val;
- substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
- return ret_val;
-}
-
-static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- int ret_val = 0, str_id;
- struct sst_runtime_stream *stream;
- int str_cmd, status;
-
- pr_debug("sst_platform_pcm_trigger called\n");
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pr_debug("sst: Trigger Start\n");
- str_cmd = SST_SND_START;
- status = SST_PLATFORM_RUNNING;
- stream->stream_info.mad_substream = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("sst: in stop\n");
- str_cmd = SST_SND_DROP;
- status = SST_PLATFORM_DROPPED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("sst: in pause\n");
- str_cmd = SST_SND_PAUSE;
- status = SST_PLATFORM_PAUSED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("sst: in pause release\n");
- str_cmd = SST_SND_RESUME;
- status = SST_PLATFORM_RUNNING;
- break;
- default:
- return -EINVAL;
- }
- ret_val = stream->ops->device_control(str_cmd, &str_id);
- if (!ret_val)
- sst_set_stream_status(stream, status);
-
- return ret_val;
-}
-
-
-static snd_pcm_uframes_t sst_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- struct sst_runtime_stream *stream;
- int ret_val, status;
- struct pcm_stream_info *str_info;
-
- stream = substream->runtime->private_data;
- status = sst_get_stream_status(stream);
- if (status == SST_PLATFORM_INIT)
- return 0;
- str_info = &stream->stream_info;
- ret_val = stream->ops->device_control(
- SST_SND_BUFFER_POINTER, str_info);
- if (ret_val) {
- pr_err("sst: error code = %d\n", ret_val);
- return ret_val;
- }
- return stream->stream_info.buffer_ptr;
-}
-
-static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
- memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
-
- return 0;
-}
-
-static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
-static struct snd_pcm_ops sst_platform_ops = {
- .open = sst_platform_open,
- .close = sst_platform_close,
- .ioctl = snd_pcm_lib_ioctl,
- .prepare = sst_platform_pcm_prepare,
- .trigger = sst_platform_pcm_trigger,
- .pointer = sst_platform_pcm_pointer,
- .hw_params = sst_platform_pcm_hw_params,
- .hw_free = sst_platform_pcm_hw_free,
-};
-
-static void sst_pcm_free(struct snd_pcm *pcm)
-{
- pr_debug("sst_pcm_free called\n");
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
-static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_pcm *pcm = rtd->pcm;
- int retval = 0;
-
- pr_debug("sst_pcm_new called\n");
- if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
- pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- SST_MIN_BUFFER, SST_MAX_BUFFER);
- if (retval) {
- pr_err("dma buffer allocationf fail\n");
- return retval;
- }
- }
- return retval;
-}
-
-/* compress stream operations */
-static void sst_compr_fragment_elapsed(void *arg)
-{
- struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
-
- pr_debug("fragment elapsed by driver\n");
- if (cstream)
- snd_compr_fragment_elapsed(cstream);
-}
-
-static int sst_platform_compr_open(struct snd_compr_stream *cstream)
-{
-
- int ret_val = 0;
- struct snd_compr_runtime *runtime = cstream->runtime;
- struct sst_runtime_stream *stream;
-
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
-
- spin_lock_init(&stream->status_lock);
-
- /* get the sst ops */
- if (!sst || !try_module_get(sst->dev->driver->owner)) {
- pr_err("no device available to run\n");
- ret_val = -ENODEV;
- goto out_ops;
- }
- stream->compr_ops = sst->compr_ops;
-
- stream->id = 0;
- sst_set_stream_status(stream, SST_PLATFORM_INIT);
- runtime->private_data = stream;
- return 0;
-out_ops:
- kfree(stream);
- return ret_val;
-}
-
-static int sst_platform_compr_free(struct snd_compr_stream *cstream)
-{
- struct sst_runtime_stream *stream;
- int ret_val = 0, str_id;
-
- stream = cstream->runtime->private_data;
- /*need to check*/
- str_id = stream->id;
- if (str_id)
- ret_val = stream->compr_ops->close(str_id);
- module_put(sst->dev->driver->owner);
- kfree(stream);
- pr_debug("%s: %d\n", __func__, ret_val);
- return 0;
-}
-
-static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
- struct snd_compr_params *params)
-{
- struct sst_runtime_stream *stream;
- int retval;
- struct snd_sst_params str_params;
- struct sst_compress_cb cb;
-
- stream = cstream->runtime->private_data;
- /* construct fw structure for this*/
- memset(&str_params, 0, sizeof(str_params));
-
- str_params.ops = STREAM_OPS_PLAYBACK;
- str_params.stream_type = SST_STREAM_TYPE_MUSIC;
- str_params.device_type = SND_SST_DEVICE_COMPRESS;
-
- switch (params->codec.id) {
- case SND_AUDIOCODEC_MP3: {
- str_params.codec = SST_CODEC_TYPE_MP3;
- str_params.sparams.uc.mp3_params.codec = SST_CODEC_TYPE_MP3;
- str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in;
- str_params.sparams.uc.mp3_params.pcm_wd_sz = 16;
- break;
- }
-
- case SND_AUDIOCODEC_AAC: {
- str_params.codec = SST_CODEC_TYPE_AAC;
- str_params.sparams.uc.aac_params.codec = SST_CODEC_TYPE_AAC;
- str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in;
- str_params.sparams.uc.aac_params.pcm_wd_sz = 16;
- if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS)
- str_params.sparams.uc.aac_params.bs_format =
- AAC_BIT_STREAM_ADTS;
- else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW)
- str_params.sparams.uc.aac_params.bs_format =
- AAC_BIT_STREAM_RAW;
- else {
- pr_err("Undefined format%d\n", params->codec.format);
- return -EINVAL;
- }
- str_params.sparams.uc.aac_params.externalsr =
- params->codec.sample_rate;
- break;
- }
-
- default:
- pr_err("codec not supported, id =%d\n", params->codec.id);
- return -EINVAL;
- }
-
- str_params.aparams.ring_buf_info[0].addr =
- virt_to_phys(cstream->runtime->buffer);
- str_params.aparams.ring_buf_info[0].size =
- cstream->runtime->buffer_size;
- str_params.aparams.sg_count = 1;
- str_params.aparams.frag_size = cstream->runtime->fragment_size;
-
- cb.param = cstream;
- cb.compr_cb = sst_compr_fragment_elapsed;
-
- retval = stream->compr_ops->open(&str_params, &cb);
- if (retval < 0) {
- pr_err("stream allocation failed %d\n", retval);
- return retval;
- }
-
- stream->id = retval;
- return 0;
-}
-
-static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
-{
- struct sst_runtime_stream *stream =
- cstream->runtime->private_data;
-
- return stream->compr_ops->control(cmd, stream->id);
-}
-
-static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp)
-{
- struct sst_runtime_stream *stream;
-
- stream = cstream->runtime->private_data;
- stream->compr_ops->tstamp(stream->id, tstamp);
- tstamp->byte_offset = tstamp->copied_total %
- (u32)cstream->runtime->buffer_size;
- pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
- return 0;
-}
-
-static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
- size_t bytes)
-{
- struct sst_runtime_stream *stream;
-
- stream = cstream->runtime->private_data;
- stream->compr_ops->ack(stream->id, (unsigned long)bytes);
- stream->bytes_written += bytes;
-
- return 0;
-}
-
-static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
- struct snd_compr_caps *caps)
-{
- struct sst_runtime_stream *stream =
- cstream->runtime->private_data;
-
- return stream->compr_ops->get_caps(caps);
-}
-
-static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
- struct snd_compr_codec_caps *codec)
-{
- struct sst_runtime_stream *stream =
- cstream->runtime->private_data;
-
- return stream->compr_ops->get_codec_caps(codec);
-}
-
-static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
- struct snd_compr_metadata *metadata)
-{
- struct sst_runtime_stream *stream =
- cstream->runtime->private_data;
-
- return stream->compr_ops->set_metadata(stream->id, metadata);
-}
-
-static struct snd_compr_ops sst_platform_compr_ops = {
-
- .open = sst_platform_compr_open,
- .free = sst_platform_compr_free,
- .set_params = sst_platform_compr_set_params,
- .set_metadata = sst_platform_compr_set_metadata,
- .trigger = sst_platform_compr_trigger,
- .pointer = sst_platform_compr_pointer,
- .ack = sst_platform_compr_ack,
- .get_caps = sst_platform_compr_get_caps,
- .get_codec_caps = sst_platform_compr_get_codec_caps,
-};
-
-static struct snd_soc_platform_driver sst_soc_platform_drv = {
- .ops = &sst_platform_ops,
- .compr_ops = &sst_platform_compr_ops,
- .pcm_new = sst_pcm_new,
- .pcm_free = sst_pcm_free,
-};
-
-static int sst_platform_probe(struct platform_device *pdev)
-{
- int ret;
-
- pr_debug("sst_platform_probe called\n");
- sst = NULL;
- ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv);
- if (ret) {
- pr_err("registering soc platform failed\n");
- return ret;
- }
-
- ret = snd_soc_register_component(&pdev->dev, &sst_component,
- sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
- if (ret) {
- pr_err("registering cpu dais failed\n");
- snd_soc_unregister_platform(&pdev->dev);
- }
- return ret;
-}
-
-static int sst_platform_remove(struct platform_device *pdev)
-{
-
- snd_soc_unregister_component(&pdev->dev);
- snd_soc_unregister_platform(&pdev->dev);
- pr_debug("sst_platform_remove success\n");
- return 0;
-}
-
-static struct platform_driver sst_platform_driver = {
- .driver = {
- .name = "sst-platform",
- .owner = THIS_MODULE,
- },
- .probe = sst_platform_probe,
- .remove = sst_platform_remove,
-};
-
-module_platform_driver(sst_platform_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sst-platform");
#include <sound/compress_driver.h>
#include <sound/soc.h>
#include <sound/initval.h>
+#include <sound/soc-dpcm.h>
static int soc_compr_open(struct snd_compr_stream *cstream)
{
return ret;
}
+static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *fe = cstream->private_data;
+ struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+ struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *codec_dai = fe->codec_dai;
+ struct snd_soc_dpcm *dpcm;
+ struct snd_soc_dapm_widget_list *list;
+ int stream;
+ int ret = 0;
+
+ if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+ if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
+ ret = platform->driver->compr_ops->open(cstream);
+ if (ret < 0) {
+ pr_err("compress asoc: can't open platform %s\n", platform->name);
+ goto out;
+ }
+ }
+
+ if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
+ ret = fe->dai_link->compr_ops->startup(cstream);
+ if (ret < 0) {
+ pr_err("compress asoc: %s startup failed\n", fe->dai_link->name);
+ goto machine_err;
+ }
+ }
+
+ fe->dpcm[stream].runtime = fe_substream->runtime;
+
+ if (dpcm_path_get(fe, stream, &list) <= 0) {
+ dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
+ fe->dai_link->name, stream ? "capture" : "playback");
+ }
+
+ /* calculate valid and active FE <-> BE dpcms */
+ dpcm_process_paths(fe, stream, &list, 1);
+
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_startup(fe, stream);
+ if (ret < 0) {
+ /* clean up all links */
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+ dpcm_be_disconnect(fe, stream);
+ fe->dpcm[stream].runtime = NULL;
+ goto fe_err;
+ }
+
+ dpcm_clear_pending_state(fe, stream);
+ dpcm_path_put(&list);
+
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+ if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+ cpu_dai->playback_active++;
+ codec_dai->playback_active++;
+ } else {
+ cpu_dai->capture_active++;
+ codec_dai->capture_active++;
+ }
+
+ cpu_dai->active++;
+ codec_dai->active++;
+ fe->codec->active++;
+
+ mutex_unlock(&fe->card->mutex);
+
+ return 0;
+
+fe_err:
+ if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+ fe->dai_link->compr_ops->shutdown(cstream);
+machine_err:
+ if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ platform->driver->compr_ops->free(cstream);
+out:
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ mutex_unlock(&fe->card->mutex);
+ return ret;
+}
+
/*
* Power down the audio subsystem pmdown_time msecs after close is called.
* This is to ensure there are no pops or clicks in between any music tracks
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- dev_dbg(rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
- codec_dai->driver->playback.stream_name,
- codec_dai->playback_active ? "active" : "inactive",
- rtd->pop_wait ? "yes" : "no");
+ dev_dbg (rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
+ codec_dai->driver->playback.stream_name,
+ codec_dai->playback_active ? "active" : "inactive",
+ rtd->pop_wait ? "yes" : "no");
/* are we waiting on this codec DAI stream */
- if (rtd->pop_wait == 1) {
+ if (rtd->pop_wait == 1 && !codec_dai->active) {
rtd->pop_wait = 0;
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_STOP);
}
-
mutex_unlock(&rtd->pcm_mutex);
}
cpu_dai->capture_active--;
codec_dai->capture_active--;
}
-
- snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+ if (!codec_dai->playback_active)
+ snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
cpu_dai->active--;
codec_dai->active--;
if (!codec_dai->active)
codec_dai->rate = 0;
-
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown)
rtd->dai_link->compr_ops->shutdown(cstream);
platform->driver->compr_ops->free(cstream);
cpu_dai->runtime = NULL;
- if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+ if (cstream->direction == SND_COMPRESS_PLAYBACK
+ && !codec_dai->playback_active) {
if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
rtd->dai_link->ignore_pmdown_time) {
snd_soc_dapm_stream_event(rtd,
schedule_delayed_work(&rtd->delayed_work,
msecs_to_jiffies(rtd->pmdown_time));
}
- } else {
+ } else if (cstream->direction == SND_COMPRESS_CAPTURE
+ && !codec_dai->capture_active) {
/* capture streams can be powered down now */
snd_soc_dapm_stream_event(rtd,
SNDRV_PCM_STREAM_CAPTURE,
return 0;
}
+static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *fe = cstream->private_data;
+ struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *codec_dai = fe->codec_dai;
+ struct snd_soc_dpcm *dpcm;
+ int stream, ret;
+
+ mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+ if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ cpu_dai->playback_active--;
+ codec_dai->playback_active--;
+ } else {
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ cpu_dai->capture_active--;
+ codec_dai->capture_active--;
+ }
+
+ cpu_dai->active--;
+ codec_dai->active--;
+ fe->codec->active--;
+
+ snd_soc_dai_digital_mute(cpu_dai, 1, stream);
+
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_hw_free(fe, stream);
+ if (ret < 0)
+ dev_err(fe->dev, "compressed hw_free failed %d\n", ret);
+
+ ret = dpcm_be_dai_shutdown(fe, stream);
+
+ /* mark FE's links ready to prune */
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+ else
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+ dpcm_be_disconnect(fe, stream);
+
+ fe->dpcm[stream].runtime = NULL;
+
+ if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+ fe->dai_link->compr_ops->shutdown(cstream);
+
+ if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ platform->driver->compr_ops->free(cstream);
+
+ mutex_unlock(&fe->card->mutex);
+ return 0;
+}
+
static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret = 0;
- mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
+ ret = platform->driver->compr_ops->trigger(cstream, cmd);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
+{
+ struct snd_soc_pcm_runtime *fe = cstream->private_data;
+ struct snd_soc_platform *platform = fe->platform;
+ int ret = 0, stream;
+
+ if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+
+ mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
ret = platform->driver->compr_ops->trigger(cstream, cmd);
goto out;
}
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_trigger(fe, stream, cmd);
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- snd_soc_dai_digital_mute(codec_dai, 0, cstream->direction);
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
- snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
break;
}
out:
- mutex_unlock(&rtd->pcm_mutex);
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ mutex_unlock(&fe->card->mutex);
return ret;
}
goto err;
}
- if (cstream->direction == SND_COMPRESS_PLAYBACK)
- snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
- SND_SOC_DAPM_STREAM_START);
- else
- snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
+ /* cancel any delayed stream shutdown that is pending */
+ if (cstream->direction == SND_COMPRESS_PLAYBACK
+ && rtd->pop_wait) {
+ rtd->pop_wait = 0;
+ cancel_delayed_work(&rtd->delayed_work);
+ }
+
+ snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_START);
- /* cancel any delayed stream shutdown that is pending */
- rtd->pop_wait = 0;
+ snd_soc_dai_digital_mute(rtd->codec_dai, 0, cstream->direction);
+err:
mutex_unlock(&rtd->pcm_mutex);
+ return ret;
+}
- cancel_delayed_work_sync(&rtd->delayed_work);
+static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
+{
+ struct snd_soc_pcm_runtime *fe = cstream->private_data;
+ struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+ struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *codec_dai = fe->codec_dai;
- return ret;
+ struct snd_pcm_hw_params *hw_params;
+ int ret = 0, stream;
-err:
- mutex_unlock(&rtd->pcm_mutex);
+ if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ hw_params = kzalloc(sizeof(*hw_params), GFP_KERNEL);
+ if (hw_params == NULL)
+ return -ENOMEM;
+
+ mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+ /* first we call set_params for the platform driver
+ * this should configure the soc side
+ * if the machine has compressed ops then we call that as well
+ * expectation is that platform and machine will configure everything
+ * for this compress path, like configuring pcm port for codec
+ */
+ if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
+ ret = platform->driver->compr_ops->set_params(cstream, params);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
+ ret = fe->dai_link->compr_ops->set_params(cstream);
+ if (ret < 0)
+ goto out;
+ }
+
+ memcpy(&fe->dpcm[fe_substream->stream].hw_params, params,
+ sizeof(struct snd_pcm_hw_params));
+
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_hw_params(fe, stream);
+ if (ret < 0)
+ goto out;
+
+ ret = dpcm_be_dai_prepare(fe, stream);
+ if (ret < 0)
+ goto out;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+ else
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+
+ snd_soc_dai_digital_mute(cpu_dai, 0, stream);
+
+out:
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ mutex_unlock(&fe->card->mutex);
return ret;
}
return ret;
}
+
/* ASoC Compress operations */
static struct snd_compr_ops soc_compr_ops = {
.open = soc_compr_open,
.get_codec_caps = soc_compr_get_codec_caps
};
+/* ASoC Dynamic Compress operations */
+static struct snd_compr_ops soc_compr_dyn_ops = {
+ .open = soc_compr_open_fe,
+ .free = soc_compr_free_fe,
+ .set_params = soc_compr_set_params_fe,
+ .get_params = soc_compr_get_params,
+ .set_metadata = sst_compr_set_metadata,
+ .get_metadata = sst_compr_get_metadata,
+ .trigger = soc_compr_trigger_fe,
+ .pointer = soc_compr_pointer,
+ .ack = soc_compr_ack,
+ .get_caps = soc_compr_get_caps,
+ .get_codec_caps = soc_compr_get_codec_caps
+};
+
/* create a new compress */
int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_compr *compr;
+ struct snd_pcm *be_pcm;
char new_name[64];
int ret = 0, direction = 0;
ret = -ENOMEM;
goto compr_err;
}
- memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
+
+ if (rtd->dai_link->dynamic) {
+ snprintf(new_name, sizeof(new_name), "(%s)",
+ rtd->dai_link->stream_name);
+
+ ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
+ 1, 0, &be_pcm);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
+ rtd->dai_link->name);
+ goto compr_err;
+ }
+
+ rtd->pcm = be_pcm;
+ rtd->fe_compr = 1;
+ be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+ /*be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;*/
+ memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
+ } else
+ memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
/* Add copy callback for not memory mapped DSPs */
if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
}
#endif
+static void codec2codec_close_delayed_work(struct work_struct *work)
+{
+ /* Currently nothing to do for c2c links
+ * Since c2c links are internal nodes in the DAPM graph and
+ * don't interface with the outside world or application layer
+ * we don't have to do any special handling on close.
+ */
+}
+
#ifdef CONFIG_PM_SLEEP
/* powers down audio subsystem for suspend */
int snd_soc_suspend(struct device *dev)
if (dai->dev != platform->dev)
continue;
+ /* dummy platform doesn't have and DAIs, don't add dummy-codec
+ * widgets here (since dev is the same)
+ */
+ if (!strcmp(dai->name, "snd-soc-dummy-dai"))
+ continue;
snd_soc_dapm_new_dai_widgets(&platform->dapm, dai);
}
}
rtd->card = card;
- /* Make sure all DAPM widgets are instantiated */
- snd_soc_dapm_new_widgets(&codec->dapm);
-
/* machine controls, routes and widgets are not prefixed */
temp = codec->name_prefix;
codec->name_prefix = NULL;
return -ENODEV;
list_add(&cpu_dai->dapm.list, &card->dapm_list);
- snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
}
if (cpu_dai->driver->probe) {
return ret;
}
} else {
+ INIT_DELAYED_WORK(&rtd->delayed_work,
+ codec2codec_close_delayed_work);
+
/* link the DAI widgets */
play_w = codec_dai->playback_widget;
capture_w = cpu_dai->capture_widget;
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- snd_soc_dapm_new_widgets(&card->dapm);
-
for (i = 0; i < card->num_links; i++) {
dai_link = &card->dai_link[i];
dai_fmt = dai_link->dai_fmt;
}
EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+ ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ ucontrol->count = params->max;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_bytes_ext);
+
/**
* snd_soc_info_xr_sx - signed multi register info callback
* @kcontrol: mreg control
return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
}
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ */
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ return wlist->widgets[0]->codec;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
+
/* get snd_card from DAPM context */
static inline struct snd_card *dapm_get_snd_card(
struct snd_soc_dapm_context *dapm)
break;
}
- if (!w->sname)
+ if (!w->sname || !strstr(w->sname, dai_w->name))
continue;
if (dai->driver->playback.stream_name &&
struct snd_soc_codec *codec;
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack_pin *pin;
+ unsigned int sync = 0;
int enable;
trace_snd_soc_jack_report(jack, mask, status);
snd_soc_dapm_enable_pin(dapm, pin->pin);
else
snd_soc_dapm_disable_pin(dapm, pin->pin);
+
+ /* we need to sync for this case only */
+ sync = 1;
}
/* Report before the DAPM sync to help users updating micbias status */
blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
- snd_soc_dapm_sync(dapm);
+ if (sync)
+ snd_soc_dapm_sync(dapm);
snd_jack_report(jack->jack, jack->status);
INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
gpios[i].jack = jack;
+ if (!gpios[i].irq_flags)
+ gpios[i].irq_flags =
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
gpio_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
+ gpios[i].irq_flags,
gpios[i].name,
&gpios[i]);
if (ret < 0)
#define DPCM_MAX_BE_USERS 8
/* DPCM stream event, send event to FE and all active BEs. */
-static int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event)
{
struct snd_soc_dpcm *dpcm;
rtd->pop_wait ? "yes" : "no");
/* are we waiting on this codec DAI stream */
- if (rtd->pop_wait == 1) {
+ if (rtd->pop_wait == 1 && !codec_dai->active) {
rtd->pop_wait = 0;
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_STOP);
/* Muting the DAC suppresses artifacts caused during digital
* shutdown, for example from stopping clocks.
*/
- snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+ if (!codec_dai->playback_active)
+ snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+
+ snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
platform->driver->ops->close(substream);
cpu_dai->runtime = NULL;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+ && !codec_dai->playback_active) {
if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
rtd->dai_link->ignore_pmdown_time) {
/* powered down playback stream now */
schedule_delayed_work(&rtd->delayed_work,
msecs_to_jiffies(rtd->pmdown_time));
}
- } else {
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE
+ && !codec_dai->capture_active) {
/* capture streams can be powered down now */
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
SND_SOC_DAPM_STREAM_STOP);
}
mutex_unlock(&rtd->pcm_mutex);
-
pm_runtime_put(platform->dev);
pm_runtime_put(codec_dai->dev);
pm_runtime_put(cpu_dai->dev);
SND_SOC_DAPM_STREAM_START);
snd_soc_dai_digital_mute(codec_dai, 0, substream->stream);
+ snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
out:
mutex_unlock(&rtd->pcm_mutex);
}
/* disconnect a BE and FE */
-static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
return 0;
}
-static int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list_)
{
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
return paths;
}
-static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
-{
- kfree(*list);
-}
-
static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dapm_widget_list **list_)
{
continue;
/* don't connect if FE is not running */
- if (!fe->dpcm[stream].runtime)
+ if (!fe->dpcm[stream].runtime && !fe->fe_compr)
continue;
/* newly connected FE and BE */
* Find the corresponding BE DAIs that source or sink audio to this
* FE substream.
*/
-static int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list, int new)
{
if (new)
return dpcm_prune_paths(fe, stream, list);
}
-static void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
}
}
-static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
int err, count = 0;
}
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
-
dpcm_set_fe_runtime(fe_substream);
snd_pcm_limit_hw_rates(runtime);
return ret;
}
-static int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
return 0;
}
-static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
return 0;
}
-static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
int ret;
if (ret < 0) {
dev_err(fe->dev,"ASoC: hw_params FE failed %d\n", ret);
dpcm_be_dai_hw_free(fe, stream);
- } else
+ } else
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
out:
return ret;
}
-static int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
int cmd)
{
struct snd_soc_dpcm *dpcm;
return ret;
}
-EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
{
return ret;
}
-static int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
int ret = 0;
int ret = 0, playback = 0, capture = 0;
if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
- if (cpu_dai->driver->playback.channels_min)
- playback = 1;
- if (cpu_dai->driver->capture.channels_min)
- capture = 1;
+ if (cpu_dai->driver->playback.channels_min) {
+ if (rtd->dai_link->playback_count)
+ playback = rtd->dai_link->playback_count;
+ else
+ playback = 1;
+ }
+
+ if (cpu_dai->driver->capture.channels_min) {
+ if (rtd->dai_link->capture_count)
+ capture = rtd->dai_link->capture_count;
+ else
+ capture = 1;
+ }
} else {
if (codec_dai->driver->playback.channels_min &&
- cpu_dai->driver->playback.channels_min)
- playback = 1;
+ cpu_dai->driver->playback.channels_min) {
+ if (rtd->dai_link->playback_count)
+ playback = rtd->dai_link->playback_count;
+ else
+ playback = 1;
+ }
+
if (codec_dai->driver->capture.channels_min &&
- cpu_dai->driver->capture.channels_min)
- capture = 1;
+ cpu_dai->driver->capture.channels_min) {
+ if (rtd->dai_link->capture_count)
+ capture = rtd->dai_link->capture_count;
+ else
+ capture = 1;
+ }
+
}
/* create the PCM */
static int dummy_dma_open(struct snd_pcm_substream *substream)
{
- snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* BE's dont need dummy params */
+ if (!rtd->dai_link->no_pcm)
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
return 0;
}
.ops = &dummy_dma_ops,
};
-static struct snd_soc_codec_driver dummy_codec;
+static struct snd_soc_dapm_widget dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("Dummy Input"),
+ SND_SOC_DAPM_OUTPUT("Dummy Output"),
+};
+
+static struct snd_soc_dapm_route intercon[] = {
+ { "Dummy Output", NULL, "Dummy Playback"},
+ { "Dummy Capture", NULL, "Dummy Input"},
+};
+
+static int dummy_codec_probe(struct snd_soc_codec *codec)
+{
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ snd_soc_dapm_new_controls(dapm, dapm_widgets,
+ ARRAY_SIZE(dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+ return 0;
+}
+
+static struct snd_soc_codec_driver dummy_codec = {
+ .probe = dummy_codec_probe,
+};
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_U32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
static struct snd_soc_dai_driver dummy_dai = {
.name = "snd-soc-dummy-dai",
.playback = {
- .stream_name = "Playback",
+ .stream_name = "Dummy Playback",
.channels_min = 1,
.channels_max = 384,
.rates = STUB_RATES,
.formats = STUB_FORMATS,
},
.capture = {
- .stream_name = "Capture",
+ .stream_name = "Dummy Capture",
.channels_min = 1,
.channels_max = 384,
.rates = STUB_RATES,