こと良悪性腫瘤の画像を使うと
Kerasがグラフィックをうまく使ってくれなくなります
仕方なく、グラフィックを使わずCPUのみを使用するようにして
いま学習させている最中です
それにしてもGPU使用だとあっという間に学習が終わるのですが
CPUだと、その何千倍、何万倍もかかってしまいます
あれから3時間近く時間が過ぎていますが
まだ、学習は終わっていません
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
h = 128
w = 128
ch = 1
w = 128
ch = 1
trainlist = pd.read_csv("./label.csv", header=0)
train_data = np.zeros((len(trainlist["image"]), h, w, ch))
for i in range(0, len(trainlist["image"])):
img = Image.open(trainlist["image"][i])
img = img.convert("L")
img_resize = img.resize((h,w), Image.LANCZOS)
tmp2 = img_to_array(img_resize)
train_data[i] = tmp2.reshape((h,w,ch))/127.5 -1.0
train_data = np.zeros((len(trainlist["image"]), h, w, ch))
for i in range(0, len(trainlist["image"])):
img = Image.open(trainlist["image"][i])
img = img.convert("L")
img_resize = img.resize((h,w), Image.LANCZOS)
tmp2 = img_to_array(img_resize)
train_data[i] = tmp2.reshape((h,w,ch))/127.5 -1.0
trainlist = pd.read_csv("./label.csv", header=0)
train_label = trainlist["label"]
train_label = train_label.values
train_label = trainlist["label"]
train_label = train_label.values
def G_model(Height, Width, channel=3):
inputs = Input((100,))
in_h = int(Height / 4)
in_w = int(Width / 4)
x = Dense(in_h * in_w * 128, activation='tanh', name='g_dense1')(inputs)
x = BatchNormalization()(x)
x = Reshape((in_h, in_w, 128), input_shape=(128 * in_h * in_w,))(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(64, (5, 5), padding='same', activation='tanh', name='g_conv1')(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(channel, (5, 5), padding='same', activation='tanh', name='g_out')(x)
model = Model(inputs, x, name='G')
return model
inputs = Input((100,))
in_h = int(Height / 4)
in_w = int(Width / 4)
x = Dense(in_h * in_w * 128, activation='tanh', name='g_dense1')(inputs)
x = BatchNormalization()(x)
x = Reshape((in_h, in_w, 128), input_shape=(128 * in_h * in_w,))(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(64, (5, 5), padding='same', activation='tanh', name='g_conv1')(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(channel, (5, 5), padding='same', activation='tanh', name='g_out')(x)
model = Model(inputs, x, name='G')
return model
def D_model(Height, Width, channel=3):
inputs = Input((Height, Width, channel))
x = Conv2D(64, (5, 5), padding='same', activation='tanh', name='d_conv1')(inputs)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(128, (5, 5), padding='same', activation='tanh', name='d_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='d_dense1')(x)
x = Dense(1, activation='sigmoid', name='d_out')(x)
model = Model(inputs, x, name='D')
return model
inputs = Input((Height, Width, channel))
x = Conv2D(64, (5, 5), padding='same', activation='tanh', name='d_conv1')(inputs)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(128, (5, 5), padding='same', activation='tanh', name='d_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='d_dense1')(x)
x = Dense(1, activation='sigmoid', name='d_out')(x)
model = Model(inputs, x, name='D')
return model
def Combined_model(g, d):
model = Sequential()
model.add(g)
model.add(d)
return model
model = Sequential()
model.add(g)
model.add(d)
return model
g = G_model(Height=128, Width=128, channel=1)
d = D_model(Height=128, Width=128, channel=1)
c = Combined_model(g=g, d=d)
g_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
d_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
d_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
g.compile(loss='binary_crossentropy', optimizer='SGD')
d.trainable = False
for layer in d.layers:
layer.trainable = False
c.compile(loss='binary_crossentropy', optimizer=g_opt)
d.trainable = False
for layer in d.layers:
layer.trainable = False
c.compile(loss='binary_crossentropy', optimizer=g_opt)
d.trainable = True
for layer in d.layers:
layer.trainable = True
d.compile(loss='binary_crossentropy', optimizer=d_opt)
for layer in d.layers:
layer.trainable = True
d.compile(loss='binary_crossentropy', optimizer=d_opt)
from sklearn.model_selection import train_test_split
(X_train, X_test, y_train, y_test) = train_test_split(train_data, train_label, test_size=0.3)
X_train = (X_train.astype(np.float32) - 127.5)/127.5
#X_train = X_train[:, :, :, None]
train_num = X_train.shape[0]
train_num_per_step = train_num // 16
train_num_per_step
(X_train, X_test, y_train, y_test) = train_test_split(train_data, train_label, test_size=0.3)
X_train = (X_train.astype(np.float32) - 127.5)/127.5
#X_train = X_train[:, :, :, None]
train_num = X_train.shape[0]
train_num_per_step = train_num // 16
train_num_per_step
Minibatch = 16
for ite in range(3000):
ite += 1
# Discremenator training
train_ind = ite % (train_num_per_step - 1)
y = X_train[train_ind * Minibatch: (train_ind+1) *Minibatch]
input_noise = np.random.uniform(-1, 1, size=(16, 100))
g_output = g.predict(input_noise, verbose=0)
X = np.concatenate((y, g_output))
Y = [1] * 16 + [0] * 16
d_loss = d.train_on_batch(X, Y)
# Generator training
input_noise = np.random.uniform(-1, 1, size=(Minibatch, 100))
g_loss = c.train_on_batch(input_noise, [1] * Minibatch)
for ite in range(3000):
ite += 1
# Discremenator training
train_ind = ite % (train_num_per_step - 1)
y = X_train[train_ind * Minibatch: (train_ind+1) *Minibatch]
input_noise = np.random.uniform(-1, 1, size=(16, 100))
g_output = g.predict(input_noise, verbose=0)
X = np.concatenate((y, g_output))
Y = [1] * 16 + [0] * 16
d_loss = d.train_on_batch(X, Y)
# Generator training
input_noise = np.random.uniform(-1, 1, size=(Minibatch, 100))
g_loss = c.train_on_batch(input_noise, [1] * Minibatch)