Escolar Documentos
Profissional Documentos
Cultura Documentos
In [2]: #Datasets
#Training dataset
start = timeit.default_timer()
training_images_EX = []
from os import listdir
from os.path import isfile, join
onlyfiles_EX = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Train/"
for im in onlyfiles_EX:
imaa=join("//home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Train/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.asarray(img, dtype='int32')[..., [2,1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
training_images_EX.append(data)
In [3]: #Datasets
#Test dataset
test_images_EX = []
from os import listdir
from os.path import isfile, join
onlyfiles1_EX = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Test/"
for im in onlyfiles1_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/EFI_Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
test_images_EX.append(data)
#print imaa
1 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
groundTruth_images_EX = []
from os import listdir
from os.path import isfile, join
#onlyfiles_DRIVE = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/DRIVE/groundTruth/")
for im in onlyfiles_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/Mask_Train/",im[0:-4]+'_EX.jpg'
data = np.expand_dims(np.asarray(Image.open(imaa)),axis=-1).transpose(2,0,1).
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
groundTruth_images_EX.append(data)
#print imaa
#import h5py
#f = h5py.File('groundTruth_images_DRIVE.hdf5', 'w')
#groundTruth_images_DRIVE = f.create_dataset('image', (2**32,), chunks=True)
groundTruth_test_images_EX = []
from os import listdir
from os.path import isfile, join
#onlyfiles = [f for f in listdir("/home/ojperdomoc/DRIU_224_224/DRIVE/groundTruth_test/")
for im in onlyfiles1_EX:
imaa=join("/home/ojperdomoc/DRIU_224_224/Ophtha_EX/Mask_Test/",im[0:-4]+'_EX.jpg'
data = np.expand_dims(np.asarray(Image.open(imaa)),axis=-1).transpose(2,0,1).
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
groundTruth_test_images_EX.append(data)
#print imaa
#import h5py
#f = h5py.File('groundTruth_test_images_DRIVE.hdf5', 'w')
#groundTruth_test_images_DRIVE = f.create_dataset('image', (2**32,), chunks=True)
2 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
3 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [6]: #DATASET
train_images_Messidor = []
label_train=[]
from os import listdir
from os.path import isfile, join
#onlyfiles = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Tra/") if is
onlyfiles = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Training/"
for im in onlyfiles:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Tra/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Training/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_train.append(data2)
train_images_Messidor.append(data/255.)
test_images_Messidor = []
label_test=[]
from os import listdir
from os.path import isfile, join
#onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Tes/") if i
onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/"
for im in onlyfiles1:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Test/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_test.append(data2)
test_images_Messidor.append(data/255.)
vali_images_Messidor = []
label_vali=[]
from os import listdir
from os.path import isfile, join
#onlyfiles1 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_224_224/Val/") if i
onlyfiles2 = [f for f in listdir("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/"
for im in onlyfiles2:
#imaa=join("/home/ojperdomoc/Messidor_/Messidor_224_224/Val/",im)
imaa=join("/home/ojperdomoc/Messidor_/Messidor_DME2/Test/",im)
data = np.asarray(Image.open(imaa)).transpose(2,0,1).astype('float32')
#data = np.array(img, dtype=np.float32)
#data = np.asarray(img, dtype='int32')[..., [2, 1, 0]]
#data = np.fromstring(img.tobytes(), dtype=np.uint8).reshape(224,224,3)
data2=int(im[0])
label_vali.append(data2)
vali_images_Messidor.append(data/255.)
4 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
def f(layers):
return K.concatenate(layers, axis=1)
def f_output_shape(input_shape):
input_shape = list(input_shape[0])
input_shape[1] *= 4
return tuple(input_shape)
In [9]: def b_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
xent_loss = K.binary_crossentropy(x_decoded_mean, x).mean()
#kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1
return xent_loss
In [10]: x_train = np.asarray(train_images_Messidor)
x_test = np.asarray(test_images_Messidor)
x_vali = np.asarray(vali_images_Messidor)
5 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)
6 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
#
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)
7 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
input = Input(shape=(3,224,224))
padding1 = ZeroPadding2D((1,1))(input)
conv1 = Convolution2D(64, 3, 3, activation='relu')(padding1)
padding2 = ZeroPadding2D((1,1))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu')(padding2)
pool1 = MaxPooling2D((2,2), strides=(2,2))(conv2)
#
padding3 = ZeroPadding2D((1,1))(pool1)
conv3 = Convolution2D(128, 3, 3, activation='relu')(padding3)
padding4 = ZeroPadding2D((1,1))(conv3)
conv4 = Convolution2D(128, 3, 3, activation='relu')(padding4)
pool2 = MaxPooling2D((2,2), strides=(2,2))(conv4)
#
padding5 = ZeroPadding2D((1,1))(pool2)
conv5 = Convolution2D(256, 3, 3, activation='relu')(padding5)
padding6 = ZeroPadding2D((1,1))(conv5)
conv6 = Convolution2D(256, 3, 3, activation='relu')(padding6)
padding7 = ZeroPadding2D((1,1))(conv6)
conv7 = Convolution2D(256, 3, 3, activation='relu')(padding7)
pool3 = MaxPooling2D((2,2), strides=(2,2))(conv7)
#
padding8 = ZeroPadding2D((1,1))(pool3)
conv8 = Convolution2D(512, 3, 3, activation='relu')(padding8)
padding9 = ZeroPadding2D((1,1))(conv8)
conv9 = Convolution2D(512, 3, 3, activation='relu')(padding9)
padding10 = ZeroPadding2D((1,1))(conv9)
conv10 = Convolution2D(512, 3, 3, activation='relu')(padding10)
pool4 = MaxPooling2D((2,2), strides=(2,2))(conv10)
#
padding11 = ZeroPadding2D((1,1))(pool4)
conv11 = Convolution2D(512, 3, 3, activation='relu')(padding11)
padding12 = ZeroPadding2D((1,1))(conv11)
conv12 = Convolution2D(512, 3, 3, activation='relu')(padding12)
padding13 = ZeroPadding2D((1,1))(conv12)
conv13 = Convolution2D(512, 3, 3, activation='relu')(padding13)
pool5 = MaxPooling2D((2,2), strides=(2,2))(conv13)
#
padding14 = ZeroPadding2D((1,1))(conv2)
conv1_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding14)
padding15 = ZeroPadding2D((1,1))(conv4)
conv2_2_16 = Convolution2D(16, 3, 3, activation='relu')(padding15)
padding16 = ZeroPadding2D((1,1))(conv7)
conv3_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding16)
padding17 = ZeroPadding2D((1,1))(conv10)
conv4_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding17)
padding18 = ZeroPadding2D((1,1))(conv13)
conv5_3_16 = Convolution2D(16, 3, 3, activation='relu')(padding18)
8 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
batch_size = 64
num_classes = 4
nb_epochs = 1000
data_augmentation = True
#class_weight=(0.06,0.50,0.80)
class_weight = {0 : 1.,1: 12.,2: 6.}
In [15]: #Train
bv_train = np.asarray(output_features_train)
od_train = np.asarray(output_features_train1)
ex_train = np.asarray(output_features_train2)
#Test
bv_test = np.asarray(output_features_test)
od_test = np.asarray(output_features_test1)
ex_test = np.asarray(output_features_test2)
#Validation
bv_vali = np.asarray(output_features_vali)
od_vali = np.asarray(output_features_vali1)
ex_vali = np.asarray(output_features_vali2)
9 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
data_6D_train = np.zeros((len(x_train),4,224,224))
for i in range(0,len(x_train)):
data_6D_train[i,0:3,:,:] = (x_train[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_train[i,3,:,:] = ex_train[i]
#data_6D_train[i,4,:,:] = bv_train[i]
#data_6D_train[i,5,:,:] = od_train[i]
data_6D_test = np.zeros((len(x_test),4,224,224))
for i in range(0,len(x_test)):
data_6D_test[i,0:3,:,:] = (x_test[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_test[i,3,:,:] = ex_test[i]
#data_6D_test[i,4,:,:] = bv_test[i]
#data_6D_test[i,5,:,:] = od_test[i]
data_6D_vali = np.zeros((len(x_vali),4,224,224))
for i in range(0,len(x_vali)):
data_6D_vali[i,0:3,:,:] = (x_vali[i])
#tmp_patch[:,:,3] = skimage.color.rgb2gray(caffe.io.load_image(dataset_masks_train[i][
data_6D_vali[i,3,:,:] = ex_vali[i]
#data_6D_vali[i,4,:,:] = bv_vali[i]
#data_6D_vali[i,5,:,:] = od_vali[i]
10 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
y_ = y ** y_pow
y_norm = y_ / (eps + y_.sum(axis=1).reshape((num_scored_items, 1)))
hist_rater_a = y_norm.sum(axis=0)
hist_rater_b = t.sum(axis=0)
conf_mat = T.dot(y_norm.T, t)
def quad_kappa_log_hybrid_loss_clipped(
y, t, y_pow=1, log_cutoff=0.9, log_scale=0.5):
log_loss_res = log_loss(y, t)
kappa_loss_res = quad_kappa_loss(y, t, y_pow=y_pow)
return kappa_loss_res + log_scale * \
T.clip(log_loss_res, log_cutoff, 10 ** 3)
11 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'
return model
12 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
13 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [20]: #model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adade
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath="/home/ojperdomoc/DR_3D_acc_kappa_H.hdf5, monitor=
history = model.fit(data_6D_train, y_train, batch_size=batch_size, nb_epoch=nb_epochs
##history = model.fit(data_6D_train, y_train,batch_size=batch_size,nb_epoch=nb_epochs,verb
#history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,
#history = model.fit_generator(combine_generator(image_generator, mask_generator),samples_
#history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1,
score = model.evaluate(data_6D_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['quad_kappa_loss'])
plt.plot(history.history['val_quad_kappa_loss'])
plt.title('model quad_kappa_loss')
plt.ylabel('quad_kappa_loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
14 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: import numpy as np
RGB = np.array([[0, 1, 2, 0],[1, 2, 3, 1],[4, 5, 6, 0],[0, 1, 2, 0]])
for i in range(0,4):
for j in range(0,4):
a=np.sum(RGB[0,:])
b=np.sum(RGB[1,:])
c=np.sum(RGB[2,:])
d=np.sum(RGB[3,:])
zero_0 = np.zeros([a],np.float32)
ones_1 = np.ones([b],np.float32)
two_2 = np.ones([c],np.float32)*2
three_3 = np.ones([d],np.float32)*3
In [24]: def b_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
xent_loss = K.binary_crossentropy(x_decoded_mean, x).mean()
#kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1
return xent_loss
In [ ]: #model = Model(input=input, output=final_conv1)
nb_epoch = 500
batch_size = 2
data_augment = True
class_weight=(0.01,0.99)
#sgd = SGD(lr=1e-3, decay=1e-8, momentum=0.9, nesterov=True)
#model.compile(optimizer=Adam(0.1), loss=b_loss,metrics=['accuracy'])
#model.fit(training_images_DRIVE/255.0, groundTruth_images_DRIVE/255.0, batch_size=32, nb_
In [ ]: X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total wi
height_shift_range=0.1, # randomly shift images vertically (fraction of total hei
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
shear_range=0.2,
zoom_range=0.2)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
15 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
data_gen_args = dict(featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the da
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0
width_shift_range=0.1, # randomly shift images horizontally (fraction
height_shift_range=0.1, # randomly shift images vertically (fraction
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
shear_range=0.2,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(X_train, augment=True, seed=seed)
mask_datagen.fit(Y_train, augment=True, seed=seed)
image_generator = image_datagen.flow(
X_train, range(32),
seed=seed)
mask_generator = mask_datagen.flow(
Y_train, range(32),
seed=seed)
#model.fit_generator(image_generator,mask_generator,samples_per_epoch=2,epochs=100)
#train_generator = zip(image_generator[0], mask_generator[0])
# combine generators into one which yields image and masks
#train_generator = zip(image_generator, mask_generator)
#train_generator = image_generator+mask_generator
xx5 = X_train[12,:,:,:].transpose(1,2,0)
In [ ]: out1 = next(image_generator)
out2 = next(mask_generator)
abee=out1[1][:]
abe2=out2[1][:]
print abee, abe2
#print abee
print out1[0].shape
plt.subplot(1,2,1)
plt.imshow(out1[0][0].transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(out2[0][0].transpose(1,2,0).squeeze())
16 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
17 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: y_actual_org = tensor.tensor4()
y_hat_org = tensor.tensor4()
y_actual = y_actual_org > 0.1
y_hat = y_hat_org > 0.1
TP = (y_actual & y_hat).sum(axis=(2, 3))
FP = (~y_actual & y_hat).sum(axis=(2, 3))
TN = (~y_actual & ~y_hat).sum(axis=(2, 3))
FN = (y_actual & ~y_hat).sum(axis=(2, 3))
Total = TP + FN
val1 = np.random.rand(15,1,224,224).astype('float32')
val2 = np.random.rand(15,1,224,224).astype('float32')
tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2}).mean()
tmp2
In [ ]: tmp2 = (TP * (1./ Total)).eval({y_hat_org: val1, y_actual_org: val2}).mean()
tmp2
In [ ]: from keras import backend as K
from keras.backend import image_dim_ordering, set_image_dim_ordering
from keras.utils.generic_utils import get_from_module
#test_images_DRIVE = np.flatten(test_images_DRIVE)
#groundTruth_test_images_DRIVE = K.flatten(groundTruth_test_images_DRIVE)
# Fit the model on the batches generated by datagen.flow().
X_train=np.asarray(training_images_EX).astype('float32')/255.0
Y_train=np.asarray(groundTruth_images_EX).astype('float32')/255.0 > 0.1
X_test=np.asarray(test_images_EX).astype('float32')/255.0
Y_test=np.asarray(groundTruth_test_images_EX).astype('float32')/255.0 >0.1
#sgd = SGD(lr=1e-6, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(optimizer=sgd, loss=b_loss,metrics=['accuracy', jaccard_index])
#model.compile(optimizer=Adam(1e-5), loss=b_loss, metrics=['accuracy','fmeasure', 'precisi
model.compile(optimizer=Adam(1e-5), loss=b_loss, metrics=['accuracy', jaccard_index
18 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: plt.plot(history.history['acc'][:175])
plt.plot(history.history['val_acc'][:175])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [ ]: Y_hat = final_conv1.eval({input: X_train})
Y_hat_t= final_conv1.eval({input: X_test})
In [ ]: #Training
plt.subplot(1,3,1)
plt.imshow(X_train[12,:,:,:].transpose(1,2,0))
plt.subplot(1,3,2)
plt.imshow(Y_hat[12,0,:,:]>0.1)
plt.subplot(1,3,3)
plt.imshow(Y_train[12,0,:,:])
19 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: #Test
plt.subplot(1,3,1)
plt.imshow(X_test[10,:,:,:].transpose(1,2,0))
plt.subplot(1,3,2)
plt.imshow(Y_hat_t[10,0,:,:]>0.1)
plt.subplot(1,3,3)
plt.imshow(Y_test[10,0,:,:]>0.1)
In [ ]: #Jaccard index
import numpy as np
from sklearn.metrics import jaccard_similarity_score
#print jaccard_similarity_score(y_true, y_pred)
#print jaccard_similarity_score(y_true, y_pred, normalize=False)
bb=[]
for x in range(0, 32):
aa = jaccard_similarity_score(Y_train[x,0,:,:].flatten(),Y_hat[x,0,:,:].flatten
bb.append(aa)
print 'Jaccard index (training) score is {}'.format(np.mean(bb))
print '----------------------------------------------------------------------'
dd=[]
for x in range(0, 15):
ee = jaccard_similarity_score(Y_test[x,0,:,:].flatten(),Y_hat_t[x,0,:,:].flatten
dd.append(ee)
print 'Jaccard index (test) score is {}'.format(np.mean(dd))
In [ ]: #Dice coefficient
k=1
cc=[]
for x in range(0,32):
dice = np.sum(Y_hat[x,0,:,:][Y_train[x,0,:,:]==k])*2.0 / (np.sum(Y_hat[x,0,:,:])
cc.append(dice)
print 'Dice similarity (training) score is {}'.format(np.mean(cc))
print '-----------------------------------------------------------------------------------
ff=[]
for x in range(0,15):
dice1 = np.sum(Y_hat_t[x,0,:,:][Y_test[x,0,:,:]==k])*2.0 / (np.sum(Y_hat_t[x
ff.append(dice1)
print 'Dice similarity (test) score is {}'.format(np.mean(ff))
In [ ]: aa2=[]
bb2=[]
for x in range(0,32):
aa=Y_hat[x,0,:,:]
aa=aa.flatten('F')
bb = Y_train[x,0,:,:]
bb= bb.flatten('F')
aa2.append(aa)
bb2.append(bb)
20 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
fpr1=[]
tpr1=[]
thresholds1=[]
AUC_ROC1=[]
for x in range(0,24):
fpr, tpr, thresholds = roc_curve(bb2[x], aa2[x])
AUC_ROC = roc_auc_score(bb2[x], aa2[x])
fpr1.append(fpr)
tpr1.append(tpr)
thresholds1.append(thresholds)
AUC_ROC1.append(AUC_ROC)
# test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
print "\nArea under the ROC curve: " +str(np.mean(AUC_ROC1))
#roc_curve = plt.figure()
plt.plot(fpr,tpr,'-')#label='Area Under the Curve (AUC = %0.4f)' % np.mean(AUC_ROC1))
plt.title('ROC curve')
plt.xlabel("FPR (False Positive Rate)")
plt.ylabel("TPR (True Positive Rate)")
plt.legend(loc="lower right")
plt.savefig('/home/ojperdomoc/'+"EX_ROC.png")
In [ ]: aa1=[]
bb1=[]
for x in range(0,15):
aa=Y_hat_t[x,0,:,:]
aa=aa.flatten('F')
bb = Y_test[x,0,:,:]
bb=bb.flatten('F')
aa1.append(aa)
bb1.append(bb)
In [ ]: #AUC curve
import numpy as np
from sklearn.metrics import precision_recall_curve
import sys
import numpy as np
import pylab as pl
from sklearn.metrics import precision_recall_curve
#print bb.shape
for x in range(0,15):
precision, recall, threshold = precision_recall_curve(bb1[x], aa1[x])
#recall = np.linspace(0.0, 1.0, num=42)
#precision = np.random.rand(42)*(1.-recall)
# take a running maximum over the reversed vector of precision values, reverse the
# result to match the order of the recall vector
decreasing_max_precision = np.maximum.accumulate(precision[::-1])[::-1]
#f, ax = plt.subplots()
plt.hold(True)
plt.plot(recall, precision, '--b')
plt.step(recall, decreasing_max_precision, '-r')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="upper right")
#plt.savefig(fname)
21 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
fpr2=[]
tpr2=[]
thresholds2=[]
AUC_ROC2=[]
for x in range(0,15):
fpr, tpr, thresholds = roc_curve(bb1[x], aa1[x])
AUC_ROC = roc_auc_score(bb1[x], aa1[x])
fpr2.append(fpr)
tpr2.append(tpr)
thresholds2.append(thresholds)
AUC_ROC2.append(AUC_ROC)
# test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
#roc_curve = plt.figure()
plt.plot(fpr,tpr,'-')#label='Area Under the Curve (AUC = %0.4f)' % np.mean(AUC_ROC1))
plt.title('ROC curve')
plt.xlabel("FPR (False Positive Rate)")
plt.ylabel("TPR (True Positive Rate)")
plt.legend(loc="lower right")
print "\nArea under the ROC curve: " +str(np.mean(AUC_ROC2))
In [ ]: aa1=[]
bb1=[]
for x in range(0,15):
aa=Y_hat_t[x,0,:,:]
aa=aa.flatten('F')>0.1
bb = Y_test[x,0,:,:]
bb=bb.flatten('F')>0.1
aa1.append(aa)
bb1.append(bb)
from sklearn.metrics import confusion_matrix
acc=0
acc2=[]
for x in range(0,15):
acc=confusion_matrix(bb1[x], aa1[x])
acc2.append(acc)
In [ ]: sens=[]
spec=[]
FNR_=[]
FPR_=[]
for i in range(0,15):
sensitivity=float(acc2[i][1,1])/(acc2[i][1,1]+acc2[i][1,0])
specificity=float(acc2[i][0,0])/(acc2[i][0,1]+acc2[i][0,0])
FNR=float(acc2[i][1,0])/(acc2[i][1,1]+acc2[i][1,0])
FPR=float(acc2[i][0,1])/(acc2[i][0,1]+acc2[i][0,0])
sens.append(sensitivity)
spec.append(specificity)
FNR_.append(FNR)
FPR_.append(FPR)
print np.mean(sens),np.mean(spec),np.mean(FNR_),np.mean(FPR_)
22 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: #AUC curve
import numpy as np
from sklearn.metrics import precision_recall_curve
import sys
import numpy as np
import pylab as pl
from sklearn.metrics import precision_recall_curve
pre=[]
re=[]
tre=[]
dec=[]
#print bb.shape
for x in range(0,15):
precision, recall, threshold = precision_recall_curve(bb1[x], aa1[x])
#recall = np.linspace(0.0, 1.0, num=42)
#precision = np.random.rand(42)*(1.-recall)
# take a running maximum over the reversed vector of precision values, reverse the
# result to match the order of the recall vector
decreasing_max_precision = np.maximum.accumulate(precision[::-1])[::-1]
pre.append(precision)
re.append(recall)
tre.append(threshold)
dec.append(decreasing_max_precision)
#f, ax = plt.subplots()
plt.hold(True)
plt.plot(recall, precision, '--b')
plt.step(recall, decreasing_max_precision, '-r')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="upper right")
#plt.savefig(fname)
In [ ]: import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
precision = dict()
recall = dict()
average_precision = dict()
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
for x in range(15):
precision[x], recall[x], _ = precision_recall_curve(bb1[x], aa1[x])
average_precision[x] = average_precision_score(bb1[x], aa1[x])
# Plot Precision-Recall curve
#plt.clf()
plt.plot(recall[10], precision[10], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
#plt.show()
plt.savefig('/home/ojperdomoc/'+'Precision-Recall_BloodVessel.png')
23 de 24 2/05/17, 2:21 p. m.
All_DRIU_BVODED_KappaClip https://lisi.unal.edu.co/user/ojperdomoc/notebooks/All_DRIU_BVOD...
In [ ]: np.mean(average_precision.values())
In [ ]:
In [ ]:
In [ ]:
24 de 24 2/05/17, 2:21 p. m.