summaryrefslogtreecommitdiff
path: root/qlearning.py
blob: e35d778dbe2066efac85552171f2a25e82974e32 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
#!/usr/bin/env python
import sys,random
import numpy as np

# Import snake game
from snake import Snake



# Setup QTable
# Boolean features:
# Snake go up?
# Snake go right?
# Snake go down?
# Snake go left?
# Apple at up?
# Apple at right?
# Apple at down?
# Apple at left?
# Obstacle at up?
# Obstacle at right?
# Obstacle at down?
# Obstacle at left?
##### Totally 12 boolean features so 2^12=4096 states
##### Totally 4 actions for the AI (up, right,down,left)
##### Totally 4*2^12 thus 16 384 table entries
##### Reward +1 when eat an apple
##### Reward -10 when hit obstacle

qtable=np.zeros((4096, 4))



game=Snake()

def isWall(h,game):
    if h[0]<0 or h[1]<0 or h[0] >= game.grid_width or h[1] >= game.grid_height:
        return(True)
    return(False)

def event_handler(game,event):
    h=game.snake[0]
    left=(h[0]-1,h[1])
    right=(h[0]+1,h[1])
    up=(h[0],h[1]-1)
    down=(h[0],h[1]+1)
    a=game.apple

    snake_go_up=(game.direction==12)
    snake_go_right=(game.direction==3)
    snake_go_down=(game.direction==6)
    snake_go_left=(game.direction==9)

    apple_up=(up==a)
    apple_right=(right==a)
    apple_down=(down==a)
    apple_left=(left==a)

    obstacle_up=(up in game.snake or isWall(up, game))
    obstacle_right=(right in game.snake or isWall(right, game))
    obstacle_down=(down in game.snake or isWall(down, game))
    obstacle_left=(left in game.snake or isWall(left, game))

    reward=0
    if event==1:
        reward=1
    elif event==-1:
        reward=-10

    # This come from me I do not now if it is the best way to identify a state
    state=2**11*snake_go_up+2**10*snake_go_right+2**9*snake_go_down+2**8*snake_go_left+2**7*apple_up+2**6*apple_right+2**5*apple_down+2**4*apple_left+2**3*obstacle_up+2**2*obstacle_right+2**1*obstacle_down+obstacle_left


    if np.max(qtable[state]) > 0:
        action = np.argmax(qtable[state])
    else:
        action=random.choice((12,3,6,9))

    game.direction=action

for i in range(0,10):
    score=game.run(event_handler=event_handler)
    print("Game ended with "+str(score))