Você está na página 1de 24

All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [1]: from keras.models import Sequential, Model


from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.preprocessing.image import ImageDataGenerator
from keras import objectives
from keras.layers import merge, Convolution2D, MaxPooling2D, Input, core
from keras.utils.layer_utils import print_summary
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import timeit
%matplotlib inline

from keras.backend import image_dim_ordering, set_image_dim_ordering

# Image dimensions ordering should follow the Theano convention


if image_dim_ordering() != 'th':
set_image_dim_ordering('th')
Using Theano backend.
Using gpu device 0: GeForce GTX TITAN X (CNMeM is disabled, cuDNN Version is
too old. Update to v5, was 3007.)

In [2]: #Datasets
#Training dataset
start = timeit.default_timer()
training_images_EX = []
from os import listdir
from os.path import isfile, join
onlyfiles_EX = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Train/"
for im in onlyfiles_EX:
imaa=join("//home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Train/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.asarray(img, dtype='int32')[..., [2,1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
training_images_EX.append(data)

In [3]: #Datasets
#Test dataset
test_images_EX = []
from os import listdir
from os.path import isfile, join
onlyfiles1_EX = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Test/"
for im in onlyfiles1_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
test_images_EX.append(data)
#print imaa

1 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [4]: #Create mask and grounTruth datasets

groundTruth_images_EX = []
from os import listdir
from os.path import isfile, join
#onlyfiles_DRIVE = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/DRIVE/groundTruth/")
for im in onlyfiles_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/Mask_Train/",im[0:-4]+'_EX.jpg'
data = np.expand_dims(np.asarray(Image.open(imaa)),axis=-1).transpose(2,0,1).
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
groundTruth_images_EX.append(data)
#print imaa
#import h5py
#f = h5py.File('groundTruth_images_DRIVE.hdf5', 'w')
#groundTruth_images_DRIVE = f.create_dataset('image', (2**32,), chunks=True)

groundTruth_test_images_EX = []
from os import listdir
from os.path import isfile, join
#onlyfiles = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/DRIVE/groundTruth_test/")
for im in onlyfiles1_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/Mask_Test/",im[0:-4]+'_EX.jpg'
data = np.expand_dims(np.asarray(Image.open(imaa)),axis=-1).transpose(2,0,1).
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
groundTruth_test_images_EX.append(data)
#print imaa
#import h5py
#f = h5py.File('groundTruth_test_images_DRIVE.hdf5', 'w')
#groundTruth_test_images_DRIVE = f.create_dataset('image', (2**32,), chunks=True)

2 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [5]: train_path = join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Train/",onlyfiles_EX


test_path = join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/Mask_Train/",onlyfiles_EX
print np.asarray(Image.open(train_path)).shape
print np.asarray(Image.open(test_path)).shape
plt.figure()
plt.subplot(1,2,1)
#print X_train.shape
plt.imshow(np.asarray(Image.open(train_path)))
plt.subplot(1,2,2)
plt.imshow(np.asarray(Image.open(test_path)))
plt.figure()
plt.subplot(1,2,1)
#print X_train.shape
plt.imshow(training_images_EX[0].transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(groundTruth_images_EX[0].transpose(1,2,0).squeeze())
(224, 224, 3)
(224, 224)

Out[5]: <matplotlib.image.AxesImage at 0x7effd387dc50>

3 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [6]: #DATASET
train_images_Messidor = []
label_train=[]
from os import listdir
from os.path import isfile, join
#onlyfiles = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Tra/") if is
onlyfiles = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Training/"
for im in onlyfiles:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Tra/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Training/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_train.append(data2)
train_images_Messidor.append(data/255.)

test_images_Messidor = []
label_test=[]
from os import listdir
from os.path import isfile, join
#onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Tes/") if i
onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/"
for im in onlyfiles1:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Test/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_test.append(data2)
test_images_Messidor.append(data/255.)

vali_images_Messidor = []
label_vali=[]
from os import listdir
from os.path import isfile, join
#onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Val/") if i
onlyfiles2 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/"
for im in onlyfiles2:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Val/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_vali.append(data2)
vali_images_Messidor.append(data/255.)

4 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [7]: #print (train_images_Messidor[2].transpose(1,2,0)).shape


plt.imshow(train_images_Messidor[2].transpose(1,2,0))
Out[7]: <matplotlib.image.AxesImage at 0x7effd39e0550>

In [8]: import keras.backend as K


from keras.layers import Lambda

def f(layers):
return K.concatenate(layers, axis=1)

def f_output_shape(input_shape):
input_shape = list(input_shape[0])
input_shape[1] *= 4
return tuple(input_shape)
In [9]: def b_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
xent_loss = K.binary_crossentropy(x_decoded_mean, x).mean()
#kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1
return xent_loss
In [10]: x_train = np.asarray(train_images_Messidor)
x_test = np.asarray(test_images_Messidor)
x_vali = np.asarray(vali_images_Messidor)

5 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [11]: # Definiton of the Blood Vessel Segmentation model


bvm_path = 'vessels_weights.hdf5'

input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)

upsample2 = UpSampling2D(size=(2, 2), dim_ordering='th')(conv2_2_16)


upsample3 = UpSampling2D(size=(4, 4), dim_ordering='th')(conv3_3_16)
upsample4 = UpSampling2D(size=(8, 8), dim_ordering='th')(conv4_3_16)
upsample5 = UpSampling2D(size=(16, 16), dim_ordering='th')(conv5_3_16)

from keras.preprocessing import image


x2=Lambda(f,f_output_shape)
x_stack = x2([conv1_2_16, upsample2, upsample3, upsample4])
padding_sub = ZeroPadding2D((1,1))(x_stack)
final_conv1_sub = Convolution2D(1, 3, 3, activation='relu')(padding_sub)
final_conv1 = Convolution2D(1, 1, 1, activation='sigmoid')(final_conv1_sub)
#N_epochs = 100000
#batch_size = 2
model_bvm = Model(input=input, output=final_conv1)

6 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [12]: # Definiton of the Optic Disc Segmentation model


odm_path = 'disc_weights.hdf5'

input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
#
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)

upsample2 = UpSampling2D(size=(2, 2), dim_ordering='th')(conv2_2_16)


upsample3 = UpSampling2D(size=(4, 4), dim_ordering='th')(conv3_3_16)
upsample4 = UpSampling2D(size=(8, 8), dim_ordering='th')(conv4_3_16)
upsample5 = UpSampling2D(size=(16, 16), dim_ordering='th')(conv5_3_16)

from keras.preprocessing import image


x2=Lambda(f,f_output_shape)
x_stack = x2([upsample2, upsample3, upsample4, upsample5])
padding_sub = ZeroPadding2D((1,1))(x_stack)
final_conv1_sub = Convolution2D(1, 3, 3, activation='relu')(padding_sub)
final_conv1 = Convolution2D(1, 1, 1, activation='sigmoid')(final_conv1_sub)
#N_epochs = 100000
#batch_size = 2

7 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [13]: # Definiton of the Exudate Segmentation model


exm_path = 'EX1_Final_weights.hdf5'

input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
#
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)

upsample2 = UpSampling2D(size=(2, 2), dim_ordering='th')(conv2_2_16)


upsample3 = UpSampling2D(size=(4, 4), dim_ordering='th')(conv3_3_16)
upsample4 = UpSampling2D(size=(8, 8), dim_ordering='th')(conv4_3_16)
upsample5 = UpSampling2D(size=(16, 16), dim_ordering='th')(conv5_3_16)

from keras.preprocessing import image


x2=Lambda(f,f_output_shape)
x_stack = x2([conv1_2_16, upsample2, upsample3, upsample4])
padding_sub = ZeroPadding2D((1,1))(x_stack)
final_conv1_sub = Convolution2D(1, 3, 3, activation='relu')(padding_sub)
final_conv1 = Convolution2D(1, 1, 1, activation='sigmoid')(final_conv1_sub)
#N_epochs = 100000
#batch_size = 2

8 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [14]: from __future__ import print_function


import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K

batch_size = 64
num_classes = 4
nb_epochs = 1000
data_augmentation = True
#class_weight=(0.06,0.50,0.80)
class_weight = {0 : 1.,1: 12.,2: 6.}

# input image dimensions


img_rows, img_cols = 224, 224
# The CIFAR10 images are RGB.
img_channels = 3

y_train = np_utils.to_categorical(label_train, num_classes)


y_test = np_utils.to_categorical(label_test, num_classes)
y_vali = np_utils.to_categorical(label_vali, num_classes)
# convert class vectors to binary class matrices
#y_train = keras.utils.to_categorical(y_train, num_classes)
#y_test = keras.utils.to_categorical(y_test, num_classes)
#print('X_train shape:', train_images_Messidor.shape)
print('Y_train shape:', y_train.shape)
print('Y_test shape:', y_test.shape)
print('Y_vali shape:', y_vali.shape)
# Let's train the model using RMSprop
#model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
Y_train shape: (840, 4)
Y_test shape: (360, 4)
Y_vali shape: (360, 4)

In [15]: #Train
bv_train = np.asarray(output_features_train)
od_train = np.asarray(output_features_train1)
ex_train = np.asarray(output_features_train2)
#Test
bv_test = np.asarray(output_features_test)
od_test = np.asarray(output_features_test1)
ex_test = np.asarray(output_features_test2)
#Validation
bv_vali = np.asarray(output_features_vali)
od_vali = np.asarray(output_features_vali1)
ex_vali = np.asarray(output_features_vali2)

9 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [16]: x_train = np.asarray(train_images_Messidor)


x_test = np.asarray(test_images_Messidor)
x_vali = np.asarray(vali_images_Messidor)
data4D = np.zeros([len(x_train),3,224,224],np.float32)
#four_channels_training = np.stack((x_train, bv_train)).squeeze()

data_6D_train = np.zeros((len(x_train),4,224,224))
for i in range(0,len(x_train)):
data_6D_train[i,0:3,:,:] = (x_train[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_train[i,3,:,:] = ex_train[i]
#data_6D_train[i,4,:,:] = bv_train[i]
#data_6D_train[i,5,:,:] = od_train[i]

data_6D_test = np.zeros((len(x_test),4,224,224))
for i in range(0,len(x_test)):
data_6D_test[i,0:3,:,:] = (x_test[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_test[i,3,:,:] = ex_test[i]
#data_6D_test[i,4,:,:] = bv_test[i]
#data_6D_test[i,5,:,:] = od_test[i]

data_6D_vali = np.zeros((len(x_vali),4,224,224))
for i in range(0,len(x_vali)):
data_6D_vali[i,0:3,:,:] = (x_vali[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_vali[i,3,:,:] = ex_vali[i]
#data_6D_vali[i,4,:,:] = bv_vali[i]
#data_6D_vali[i,5,:,:] = od_vali[i]

10 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [17]: import theano


import theano.tensor as T

def log_loss(y, t, eps=1e-15):


"""
cross entropy loss, summed over classes, mean over batches
"""
y = T.clip(y, eps, 1 - eps)
loss = -T.sum(t * T.log(y)) / y.shape[0].astype(theano.config.floatX)
return loss

def accuracy_loss(y, t, eps=1e-15):


y_ = T.cast(T.argmax(y, axis=1), 'int32')
t_ = T.cast(T.argmax(t, axis=1), 'int32')

# predictions = T.argmax(y, axis=1)


return -T.mean(T.switch(T.eq(y_, t_), 1, 0))

def quad_kappa_loss(y, t, y_pow=1, eps=1e-15):


num_scored_items = y.shape[0]
num_ratings = 4
tmp = T.tile(T.arange(0, num_ratings).reshape((num_ratings, 1)),
reps=(1, num_ratings)).astype(theano.config.floatX)
weights = (tmp - tmp.T) ** 2 / (num_ratings - 1) ** 2

y_ = y ** y_pow
y_norm = y_ / (eps + y_.sum(axis=1).reshape((num_scored_items, 1)))

hist_rater_a = y_norm.sum(axis=0)
hist_rater_b = t.sum(axis=0)

conf_mat = T.dot(y_norm.T, t)

nom = T.sum(weights * conf_mat)


denom = T.sum(weights * T.dot(hist_rater_a.reshape((num_ratings, 1)),
hist_rater_b.reshape((1, num_ratings))) /
num_scored_items.astype(theano.config.floatX))

return - (1 - nom / denom)

def quad_kappa_log_hybrid_loss(y, t, y_pow=1, log_scale=0.5, log_offset=0.50):


log_loss_res = log_loss(y, t)
kappa_loss_res = quad_kappa_loss(y, t, y_pow=y_pow)
return kappa_loss_res + log_scale * (log_loss_res - log_offset)

def quad_kappa_log_hybrid_loss_clipped(
y, t, y_pow=1, log_cutoff=0.9, log_scale=0.5):
log_loss_res = log_loss(y, t)
kappa_loss_res = quad_kappa_loss(y, t, y_pow=y_pow)
return kappa_loss_res + log_scale * \
T.clip(log_loss_res, log_cutoff, 10 ** 3)

11 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [18]: input_shape = (4,224,224)


def baseline_model():
model = Sequential()
model.add(Convolution2D(32, 5, 5,input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'
return model

12 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [19]: #model = Model(input=input, output=dense3)


model=baseline_model()
print_summary(model.layers)
____________________________________________________________________________
________________________
Layer (type) Output Shape Param # Connected
to
============================================================================
========================
convolution2d_61 (Convolution2D) (None, 32, 220, 220) 3232 convoluti
on2d_input_1[0][0]
____________________________________________________________________________
________________________
activation_1 (Activation) (None, 32, 220, 220) 0 convoluti
on2d_61[0][0]
____________________________________________________________________________
________________________
maxpooling2d_16 (MaxPooling2D) (None, 32, 55, 55) 0 activatio
n_1[0][0]
____________________________________________________________________________
________________________
dropout_1 (Dropout) (None, 32, 55, 55) 0 maxpoolin
g2d_16[0][0]
____________________________________________________________________________
________________________
convolution2d_62 (Convolution2D) (None, 64, 55, 55) 18496 dropout_1
[0][0]
____________________________________________________________________________
________________________
activation_2 (Activation) (None, 64, 55, 55) 0 convoluti
on2d_62[0][0]
____________________________________________________________________________
________________________
convolution2d_63 (Convolution2D) (None, 64, 53, 53) 36928 activatio
n_2[0][0]
____________________________________________________________________________
________________________
activation_3 (Activation) (None, 64, 53, 53) 0 convoluti
on2d_63[0][0]
____________________________________________________________________________
________________________
maxpooling2d_17 (MaxPooling2D) (None, 64, 13, 13) 0 activatio
n_3[0][0]
____________________________________________________________________________
________________________
dropout_2 (Dropout) (None, 64, 13, 13) 0 maxpoolin
g2d_17[0][0]
____________________________________________________________________________
________________________
flatten_1 (Flatten) (None, 10816) 0 dropout_2
[0][0]
____________________________________________________________________________
________________________
dense_1 (Dense) (None, 512) 5538304 flatten_1
[0][0]
____________________________________________________________________________
________________________
activation_4 (Activation) (None, 512) 0 dense_1[0
][0]
____________________________________________________________________________
________________________
dropout_3 (Dropout) (None, 512) 0 activatio
n_4[0][0]
____________________________________________________________________________

13 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [20]: #model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adade
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath="/home/ojperdomoc/DR_3D_acc_kappa_H.hdf5, monitor=
history = model.fit(data_6D_train, y_train, batch_size=batch_size, nb_epoch=nb_epochs
##history = model.fit(data_6D_train, y_train,batch_size=batch_size,nb_epoch=nb_epochs,verb
#history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,
#history = model.fit_generator(combine_generator(image_generator, mask_generator),samples_
#history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,
score = model.evaluate(data_6D_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['quad_kappa_loss'])
plt.plot(history.history['val_quad_kappa_loss'])
plt.title('model quad_kappa_loss')
plt.ylabel('quad_kappa_loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

Train on 840 samples, validate on 360 samples


Epoch 1/1000
840/840 [==============================] - 3s - loss: 4.6095 - acc: 0.3667 -
quad_kappa_loss: -0.0057 - quad_kappa_log_hybrid_loss: 10.7832 - quad_kappa_
log_hybrid_loss_clipped: 11.0332 - val_loss: 0.9504 - val_acc: 0.8111 - val_
quad_kappa_loss: 0.0061 - val_quad_kappa_log_hybrid_loss: 10.1880 - val_quad
_kappa_log_hybrid_loss_clipped: 10.4380
Epoch 2/1000
840/840 [==============================] - 3s - loss: 2.6784 - acc: 0.4119 -
quad_kappa_loss: -0.0057 - quad_kappa_log_hybrid_loss: 11.0627 - quad_kappa_
log_hybrid_loss_clipped: 11.3127 - val_loss: 0.8470 - val_acc: 0.8111 - val_
quad_kappa_loss: -5.7774e-04 - val_quad_kappa_log_hybrid_loss: 9.4092 - val_
quad_kappa_log_hybrid_loss_clipped: 9.6592
Epoch 3/1000
840/840 [==============================] - 3s - loss: 2.7661 - acc: 0.4643 -
quad_kappa_loss: -0.0017 - quad_kappa_log_hybrid_loss: 11.1201 - quad_kappa_
log_hybrid_loss_clipped: 11.3701 - val_loss: 0.9848 - val_acc: 0.8250 - val_
quad_kappa_loss: -0.0050 - val_quad_kappa_log_hybrid_loss: 10.5214 - val_qua
d_kappa_log_hybrid_loss_clipped: 10.7714
Epoch 4/1000
840/840 [==============================] - 3s - loss: 2.4705 - acc: 0.6060 -
quad_kappa_loss: -0.0519 - quad_kappa_log_hybrid_loss: 10.2827 - quad_kappa_
log_hybrid_loss_clipped: 10.5327 - val_loss: 1.1123 - val_acc: 0.7889 - val_
quad_kappa_loss: -0.0213 - val_quad_kappa_log_hybrid_loss: 11.2755 - val_qua
d_kappa_log_hybrid_loss_clipped: 11.5255

14 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [21]: y_Pred = model.predict_classes(data_6D_test)


print('Test loss:', score[0])
print('Test accuracy:', score[1])
360/360 [==============================] - 0s
Test loss: 2.32239194976
Test accuracy: 0.794444444444

In [22]: from sklearn.metrics import confusion_matrix


confusion_matrix(label_test, y_Pred)
Out[22]: array([[264, 5, 23],
[ 18, 2, 3],
[ 22, 3, 20]])

In [ ]: import numpy as np
RGB = np.array([[0, 1, 2, 0],[1, 2, 3, 1],[4, 5, 6, 0],[0, 1, 2, 0]])
for i in range(0,4):
for j in range(0,4):
a=np.sum(RGB[0,:])
b=np.sum(RGB[1,:])
c=np.sum(RGB[2,:])
d=np.sum(RGB[3,:])
zero_0 = np.zeros([a],np.float32)
ones_1 = np.ones([b],np.float32)
two_2 = np.ones([c],np.float32)*2
three_3 = np.ones([d],np.float32)*3
In [24]: def b_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
xent_loss = K.binary_crossentropy(x_decoded_mean, x).mean()
#kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1
return xent_loss
In [ ]: #model = Model(input=input, output=final_conv1)
nb_epoch = 500
batch_size = 2
data_augment = True
class_weight=(0.01,0.99)
#sgd = SGD(lr=1e-3, decay=1e-8, momentum=0.9, nesterov=True)
#model.compile(optimizer=Adam(0.1), loss=b_loss,metrics=['accuracy'])
#model.fit(training_images_DRIVE/255.0, groundTruth_images_DRIVE/255.0, batch_size=32, nb_
In [ ]: X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total wi
height_shift_range=0.1, # randomly shift images vertically (fraction of total hei
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
shear_range=0.2,
zoom_range=0.2)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)

15 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
data_gen_args = dict(featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the da
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0
width_shift_range=0.1, # randomly shift images horizontally (fraction
height_shift_range=0.1, # randomly shift images vertically (fraction
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
shear_range=0.2,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(X_train, augment=True, seed=seed)
mask_datagen.fit(Y_train, augment=True, seed=seed)
image_generator = image_datagen.flow(
X_train, range(32),
seed=seed)

mask_generator = mask_datagen.flow(
Y_train, range(32),
seed=seed)

def combine_generator(gen1, gen2):


while True:
yield(gen1.next()[0], gen2.next()[0])

#model.fit_generator(image_generator,mask_generator,samples_per_epoch=2,epochs=100)
#train_generator = zip(image_generator[0], mask_generator[0])
# combine generators into one which yields image and masks
#train_generator = zip(image_generator, mask_generator)
#train_generator = image_generator+mask_generator
xx5 = X_train[12,:,:,:].transpose(1,2,0)
In [ ]: out1 = next(image_generator)
out2 = next(mask_generator)
abee=out1[1][:]
abe2=out2[1][:]
print abee, abe2
#print abee
print out1[0].shape
plt.subplot(1,2,1)
plt.imshow(out1[0][0].transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(out2[0][0].transpose(1,2,0).squeeze())

16 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: # for custom metrics


import keras.backend as K
from theano import tensor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score

def jaccard_index(y_true, y_pred):


y_pred = y_pred > 0.1
y_true = y_true > 0.1
inters = (y_true & y_pred).sum(axis=(2, 3))
union = (y_true | y_pred).sum(axis=(2, 3))
return tensor.switch(tensor.eq(union, 0), 1, tensor.cast(inters, 'float32')

def conf_matrix(y_actual, y_hat):


y_actual = y_actual > 0.1
y_hat = y_hat > 0.1
TP = (y_actual & y_hat).sum(axis=(2, 3))
FP = (~y_actual & y_hat).sum(axis=(2, 3))
TN = (~y_actual & ~y_hat).sum(axis=(2, 3))
FN = (y_actual & ~y_hat).sum(axis=(2, 3))
Total = TP + FN
#tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2})
#return tensor.switch(tensor.eq(Total, 0), 1, tensor.cast(TP, 'float32') / Total).mean
#return (TP*(Total^-1)).mean()
return (TP * (1./Total)).mean()

def conf_matrix2(y_actual, y_hat):


y_actual = y_actual > 0.1
y_hat = y_hat > 0.1
TP = (y_actual & y_hat).sum(axis=(2, 3))
FP = (~y_actual & y_hat).sum(axis=(2, 3))
TN = (~y_actual & ~y_hat).sum(axis=(2, 3))
FN = (y_actual & ~y_hat).sum(axis=(2, 3))
#Total = TP + FN
#tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2})
#return tensor.switch(tensor.eq(Total, 0), 1, tensor.cast(TP, 'float32') / Total).mean
#return (TP*(Total^-1)).mean()
return (TP).mean()

def conf_matrix3(y_actual, y_hat):


y_actual = y_actual > 0.1
y_hat = y_hat > 0.1
TP = (y_actual & y_hat).sum(axis=(2, 3))
FP = (~y_actual & y_hat).sum(axis=(2, 3))
TN = (~y_actual & ~y_hat).sum(axis=(2, 3))
FN = (y_actual & ~y_hat).sum(axis=(2, 3))
Total = TP + FN
#tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2})
#return tensor.switch(tensor.eq(Total, 0), 1, tensor.cast(TP, 'float32') / Total).mean
return (Total).mean()
#return (TP).mean()

17 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: y_actual_org = tensor.tensor4()
y_hat_org = tensor.tensor4()
y_actual = y_actual_org > 0.1
y_hat = y_hat_org > 0.1
TP = (y_actual & y_hat).sum(axis=(2, 3))
FP = (~y_actual & y_hat).sum(axis=(2, 3))
TN = (~y_actual & ~y_hat).sum(axis=(2, 3))
FN = (y_actual & ~y_hat).sum(axis=(2, 3))
Total = TP + FN
val1 = np.random.rand(15,1,224,224).astype('float32')
val2 = np.random.rand(15,1,224,224).astype('float32')
tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2}).mean()
tmp2
In [ ]: tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2}).mean()
tmp2
In [ ]: from keras import backend as K
from keras.backend import image_dim_ordering, set_image_dim_ordering
from keras.utils.generic_utils import get_from_module
#test_images_DRIVE = np.flatten(test_images_DRIVE)
#groundTruth_test_images_DRIVE = K.flatten(groundTruth_test_images_DRIVE)
# Fit the model on the batches generated by datagen.flow().

X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
X_test=np.asarray(test_images_EX).astype('float32')/255.0
Y_test=np.asarray(groundTruth_test_images_EX).astype('float32')/255.0 >0.1
#sgd = SGD(lr=1e-6, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(optimizer=sgd, loss=b_loss,metrics=['accuracy', jaccard_index])
#model.compile(optimizer=Adam(1e-5), loss=b_loss, metrics=['accuracy','fmeasure', 'precisi
model.compile(optimizer=Adam(1e-5), loss=b_loss, metrics=['accuracy', jaccard_index

In [ ]: #history = model.fit_generator(datagen.flow(X_train, Y_train,batch_size=batch_size),


# samples_per_epoch=X_train.shape[0],
# nb_epoch=nb_epoch,
# validation_data=(X_test, Y_test),
# class_weight=class_weight)
In [ ]: aa=combine_generator(image_generator, mask_generator)
#demo_gen = num_gen()
out=next(aa)
abee=out[0][0][:]
abe2=out[1][0][:]
print abee.shape, abe2.shape
plt.subplot(1,2,1)
plt.imshow(abee.transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(abe2.transpose(1,2,0).squeeze())

18 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: #model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True,


#history = model.fit(image_generator, mask_generator, batch_size=batch_size, nb_epoch=nb_e
from keras.callbacks import ModelCheckpoint
#checkpointer = ModelCheckpoint(filepath="/home/ojperdomoc/EX_Final_weights.hdf5, monitor=
checkpointer = ModelCheckpoint(filepath="/home/ojperdomoc/EX1_Final_weights.hdf5, monitor=
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose
#history = model.fit_generator(combine_generator(image_generator, mask_generator),samples_

#history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,


score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
stop = timeit.default_timer()
print stop - start

In [ ]: # summarize history for Jaccard Index


plt.plot(history.history['jaccard_index'])
plt.plot(history.history['val_jaccard_index'])
plt.title('jaccard_index')
plt.ylabel('jaccard_index')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [ ]: print(history.history.keys())

In [ ]: plt.plot(history.history['acc'][:175])
plt.plot(history.history['val_acc'][:175])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [ ]: Y_hat = final_conv1.eval({input: X_train})
Y_hat_t= final_conv1.eval({input: X_test})
In [ ]: #Training
plt.subplot(1,3,1)
plt.imshow(X_train[12,:,:,:].transpose(1,2,0))
plt.subplot(1,3,2)
plt.imshow(Y_hat[12,0,:,:]>0.1)
plt.subplot(1,3,3)
plt.imshow(Y_train[12,0,:,:])

19 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: #Test
plt.subplot(1,3,1)
plt.imshow(X_test[10,:,:,:].transpose(1,2,0))
plt.subplot(1,3,2)
plt.imshow(Y_hat_t[10,0,:,:]>0.1)
plt.subplot(1,3,3)
plt.imshow(Y_test[10,0,:,:]>0.1)
In [ ]: #Jaccard index
import numpy as np
from sklearn.metrics import jaccard_similarity_score
#print jaccard_similarity_score(y_true, y_pred)
#print jaccard_similarity_score(y_true, y_pred, normalize=False)
bb=[]
for x in range(0, 32):
aa = jaccard_similarity_score(Y_train[x,0,:,:].flatten(),Y_hat[x,0,:,:].flatten
bb.append(aa)
print 'Jaccard index (training) score is {}'.format(np.mean(bb))
print '----------------------------------------------------------------------'
dd=[]
for x in range(0, 15):
ee = jaccard_similarity_score(Y_test[x,0,:,:].flatten(),Y_hat_t[x,0,:,:].flatten
dd.append(ee)
print 'Jaccard index (test) score is {}'.format(np.mean(dd))
In [ ]: #Dice coefficient
k=1
cc=[]
for x in range(0,32):
dice = np.sum(Y_hat[x,0,:,:][Y_train[x,0,:,:]==k])*2.0 / (np.sum(Y_hat[x,0,:,:])
cc.append(dice)
print 'Dice similarity (training) score is {}'.format(np.mean(cc))
print '-----------------------------------------------------------------------------------
ff=[]
for x in range(0,15):
dice1 = np.sum(Y_hat_t[x,0,:,:][Y_test[x,0,:,:]==k])*2.0 / (np.sum(Y_hat_t[x
ff.append(dice1)
print 'Dice similarity (test) score is {}'.format(np.mean(ff))

In [ ]: aa2=[]
bb2=[]
for x in range(0,32):
aa=Y_hat[x,0,:,:]
aa=aa.flatten('F')
bb = Y_train[x,0,:,:]
bb= bb.flatten('F')
aa2.append(aa)
bb2.append(bb)

20 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: from sklearn.metrics import roc_curve


from sklearn.metrics import roc_auc_score

fpr1=[]
tpr1=[]
thresholds1=[]
AUC_ROC1=[]
for x in range(0,24):
fpr, tpr, thresholds = roc_curve(bb2[x], aa2[x])
AUC_ROC = roc_auc_score(bb2[x], aa2[x])
fpr1.append(fpr)
tpr1.append(tpr)
thresholds1.append(thresholds)
AUC_ROC1.append(AUC_ROC)
# test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
print "\nArea under the ROC curve: " +str(np.mean(AUC_ROC1))
#roc_curve = plt.figure()
plt.plot(fpr,tpr,'-')#label='Area Under the Curve (AUC = %0.4f)' % np.mean(AUC_ROC1))
plt.title('ROC curve')
plt.xlabel("FPR (False Positive Rate)")
plt.ylabel("TPR (True Positive Rate)")
plt.legend(loc="lower right")

plt.savefig('/home/ojperdomoc/'+"EX_ROC.png")
In [ ]: aa1=[]
bb1=[]
for x in range(0,15):
aa=Y_hat_t[x,0,:,:]
aa=aa.flatten('F')
bb = Y_test[x,0,:,:]
bb=bb.flatten('F')
aa1.append(aa)
bb1.append(bb)
In [ ]: #AUC curve
import numpy as np
from sklearn.metrics import precision_recall_curve
import sys
import numpy as np
import pylab as pl
from sklearn.metrics import precision_recall_curve

#print bb.shape
for x in range(0,15):
precision, recall, threshold = precision_recall_curve(bb1[x], aa1[x])
#recall = np.linspace(0.0, 1.0, num=42)
#precision = np.random.rand(42)*(1.-recall)

# take a running maximum over the reversed vector of precision values, reverse the
# result to match the order of the recall vector
decreasing_max_precision = np.maximum.accumulate(precision[::-1])[::-1]
#f, ax = plt.subplots()
plt.hold(True)
plt.plot(recall, precision, '--b')
plt.step(recall, decreasing_max_precision, '-r')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="upper right")
#plt.savefig(fname)

21 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: from sklearn.metrics import roc_curve


from sklearn.metrics import roc_auc_score

fpr2=[]
tpr2=[]
thresholds2=[]
AUC_ROC2=[]
for x in range(0,15):
fpr, tpr, thresholds = roc_curve(bb1[x], aa1[x])
AUC_ROC = roc_auc_score(bb1[x], aa1[x])
fpr2.append(fpr)
tpr2.append(tpr)
thresholds2.append(thresholds)
AUC_ROC2.append(AUC_ROC)
# test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
#roc_curve = plt.figure()
plt.plot(fpr,tpr,'-')#label='Area Under the Curve (AUC = %0.4f)' % np.mean(AUC_ROC1))
plt.title('ROC curve')
plt.xlabel("FPR (False Positive Rate)")
plt.ylabel("TPR (True Positive Rate)")
plt.legend(loc="lower right")
print "\nArea under the ROC curve: " +str(np.mean(AUC_ROC2))
In [ ]: aa1=[]
bb1=[]
for x in range(0,15):
aa=Y_hat_t[x,0,:,:]
aa=aa.flatten('F')>0.1
bb = Y_test[x,0,:,:]
bb=bb.flatten('F')>0.1
aa1.append(aa)
bb1.append(bb)
from sklearn.metrics import confusion_matrix
acc=0
acc2=[]
for x in range(0,15):
acc=confusion_matrix(bb1[x], aa1[x])
acc2.append(acc)
In [ ]: sens=[]
spec=[]
FNR_=[]
FPR_=[]
for i in range(0,15):
sensitivity=float(acc2[i][1,1])/(acc2[i][1,1]+acc2[i][1,0])
specificity=float(acc2[i][0,0])/(acc2[i][0,1]+acc2[i][0,0])
FNR=float(acc2[i][1,0])/(acc2[i][1,1]+acc2[i][1,0])
FPR=float(acc2[i][0,1])/(acc2[i][0,1]+acc2[i][0,0])
sens.append(sensitivity)
spec.append(specificity)
FNR_.append(FNR)
FPR_.append(FPR)
print np.mean(sens),np.mean(spec),np.mean(FNR_),np.mean(FPR_)

22 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: #AUC curve
import numpy as np
from sklearn.metrics import precision_recall_curve
import sys
import numpy as np
import pylab as pl
from sklearn.metrics import precision_recall_curve

pre=[]
re=[]
tre=[]
dec=[]
#print bb.shape
for x in range(0,15):
precision, recall, threshold = precision_recall_curve(bb1[x], aa1[x])
#recall = np.linspace(0.0, 1.0, num=42)
#precision = np.random.rand(42)*(1.-recall)

# take a running maximum over the reversed vector of precision values, reverse the
# result to match the order of the recall vector
decreasing_max_precision = np.maximum.accumulate(precision[::-1])[::-1]
pre.append(precision)
re.append(recall)
tre.append(threshold)
dec.append(decreasing_max_precision)
#f, ax = plt.subplots()
plt.hold(True)
plt.plot(recall, precision, '--b')
plt.step(recall, decreasing_max_precision, '-r')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="upper right")
#plt.savefig(fname)
In [ ]: import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
precision = dict()
recall = dict()
average_precision = dict()
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
for x in range(15):
precision[x], recall[x], _ = precision_recall_curve(bb1[x], aa1[x])
average_precision[x] = average_precision_score(bb1[x], aa1[x])
# Plot Precision-Recall curve
#plt.clf()
plt.plot(recall[10], precision[10], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
#plt.show()
plt.savefig('/home/ojperdomoc/'+'Precision-Recall_BloodVessel.png')

23 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...

In [ ]: np.mean(average_precision.values())

In [ ]:

In [ ]:

In [ ]:

24 de 24 2/05/17, 2:21 p. m.

Você também pode gostar