ALS communication

Update 7.9.2016
OpenBCI
EMOTIV
Brainmotic - EEG to control an Intelligent House (oriented to handicapped)
Update 25.5.2016: Two interesting articles that could help the project:
  • Using finite state machine and a hybrid of EEG signal and EOG artifacts for an asynchronous wheelchair navigation, Expert Systems with Applications, 2015
  • HMM based automated wheelchair navigation using EOG traces in EEG, Journal of Neural Engineering, 2014

  • Update 30.3.2016: This project probably is going to be a brain-computer-interface. I plan to use an EEG Biosensor from Neurosky.
    Basically this projects goal is to enable communication for persons suffering from ALS or other motor neuron diseases. The first idea was to track eye movement and point a computer mouse with that (gaze tracking). Tristan Hume has made a similar project. There is a problem with this approach for ALS patients, their eyes get tired very easy. That is why I changed the approach towards tracking eyeblinking. The user interface would look a lot like Gaze talk and the idea is to stop the "cursor" at the box you want to select.

    The code below can now find a face and find eyes on the face. It also create a basic user interface but there is not yet anything inside it. It would work like an letterboard. I got two problems with this approach; you must have the ability to control your eyelids and the algorithm (haarcascades) I use is not tracking fast enough. The ALS Association has listed different technologies for communication here.

    gaze.py
    
    # Gaze communication
    # Kim Salmi, kim.salmi(at)iki(dot)fi
    # https://tunn.us/arduino/
    # License: GPLv3
    
    
    import sys
    import Tkinter as tk
    import time
    import threading
    import logging
    import video
    
    
    class Box:
      def __init__(self, item):
        self.item = item
        self.item.config(bg=unselectColor)
        self.status = 0
    
      def getStatus(self):
        return self.status
    
      def unselect(self):
        self.item.config(bg=unselectColor)
        self.status = 0
    
      def select(self):
        self.item.config(bg=selectColor)
        self.status = 1
    
      def activate(self):
        self.item.config(bg=activeColor)
        self.status = 2
    
    class Boxes:
      amount = 0
    
      def __init__(self):
        self.box = [];
        self.pointer = 0
    
      def addBox(self, x, y):
        self.box.append(Box(tk.Canvas(gui, bg="black", height=boxHeight, width=boxWidth)))
        self.box[Boxes.amount].item.grid(row=y, column=x, sticky="W")
        Boxes.amount += 1
    
      def increasePointer(self):
        self.pointer += 1
        if self.pointer >= Boxes.amount:
          self.pointer = 0
        logging.debug("increasePointer: Pointer:",self.pointer)
    
      def decreasePointer(self):
        self.pointer -= 1
        if self.pointer <= 0:
          self.pointer = Boxes.amount
        logging.debug("decreasePointer: Pointer:",self.pointer)     
    
      def nextBox(self):
        self.box[self.pointer].unselect()
        Boxes.increasePointer(self)
        self.box[self.pointer].select()
        logging.debug("nextBox: Pointer: %s amount: %s",self.pointer, Boxes.amount)
    
      def keyDown(self, key):
        self.box[self.pointer].activate()
        logging.debug("keyDown: Pointer: %s",self.pointer)
        
    
    
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    
    
    gui = tk.Tk()
    screenwidth = gui.winfo_screenwidth()
    screenheight = gui.winfo_screenheight()
    
    gui.geometry(str(screenwidth)+'x'+str(screenheight)+'+250+250')
    gui.title('Gaze communication')
    gui.configure(bg="grey")
    
    activeColor = "red"
    selectColor = "#0000CC"
    unselectColor = "black"
    
    xAmount = 5
    yAmount = 3
    
    boxHeight = (screenheight-100) / yAmount
    boxWidth = (screenheight-100) / yAmount
    boxPointer = 0
    
    boxes=Boxes()
    for y in range(0,yAmount):
      for x in range(0,xAmount):
        boxes.addBox(x, y)
    
    
    video = video.Video(0)
    button = tk.Button(gui, text="OK", command= lambda: loop(i)).grid(row=4, column=0, sticky="W")
    
    gui.bind("", boxes.keyDown)
    
    while 1:
      boxes.nextBox()
      for j in range(0, 2):
        video.nextFrame()
        video.detectFace()
        video.showFrame()
        video.destroy()
        gui.update_idletasks()
        gui.update()
        #time.sleep(0.1)
    


    video.py
    
    # Gaze communication video classes
    # Kim Salmi, kim.salmi(at)iki(dot)fi
    # https://tunn.us/arduino/
    # License: GPLv3
    
    import numpy as py
    import cv2
    
    class Face:
      amount = 0
      separationThresh = 10
    
      def __init__(self, x, y, w, h):
        self.x = x
        self.y = y
        self.w = w
        self.h = h
        Face.amount += 1
    
      def sameFace(self, x, y, w, h):
        same = 0
        if x+25 > self.x and x-25 < self.x:
          same = 1
        elif y+25 > self.y and y-25 < self.y:
          same = 1
        return same
    
      def editFace(self, x, y, w, h):
        self.x = x
        self.y = y
        self.w = w
        self.h = h
    
    
    
    class Faces:
    
      def __init__(self):
        self.faces = []
    
      def addFace(self, x, y, w, h):
        face = self.familiarFaces(x, y, w, h)
        if face:
          face.editFace(x, y, w, h)
        else: 
          self.faces.append( Face(x,y,w,h) )
          print(Face.amount)
        
      def familiarFaces(self, x, y, w, h):
        for face in self.faces:
          if face.sameFace(x, y, w, h):
            return face
        return None
    
    
    
    class Video:
      def __init__(self, camera):
        self.camera = cv2.VideoCapture(0)
        self.frame = None
        self.face_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
        self.eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
        self.faces = Faces()
    
    
      def nextFrame(self):
        grabbed, self.frame = self.camera.read()
        self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        #self.gray = cv2.GaussianBlur(self.gray, (31, 31), 0)
    
        #cv2.rectangle(self.frame,(400,300),(850,720),(0,0,255),2)
        #self.gray = self.gray[400:300, 850:720]
    
    
      def showFrame(self):
        cv2.imshow("Feed", self.frame)
    
    
      def destroy(self):
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
          self.camera.release()
          cv2.destroyAllWindows()
    
      def detectFace(self):
        faces = self.face_cascade.detectMultiScale(self.gray, 1.3, 8)
        for (x,y,w,h) in faces:
          h=h-(h/3) #just use the upper part of the face, avoid false positive from nose
          cv2.rectangle(self.frame,(x,y),(x+w,y+h),(255,0,0),2)
          roi_gray = self.gray[y:y+h, x:x+w]
          roi_color = self.frame[y:y+h, x:x+w]
          eye = self.eye_cascade.detectMultiScale(roi_gray)
          for (ex,ey,ew,eh) in eye:
            self.faces.addFace(x,y,w,h)
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
    
    

    Kim Salmi


    tunn.us