SOS_token = 0 EOS_token = 1 classLang: def__init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2# Count SOS and EOS
defaddSentence(self, sentence): for word in sentence.split(' '): self.addWord(word)
defaddWord(self, word): if word notin self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1
1 2 3 4 5 6 7 8
defnormalizeString(s): t = s s = s.lower().strip() s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) if len(s.replace(' ','')): # ascii文字以外 return s return t
1 2 3 4 5 6 7 8 9 10 11 12 13 14
defreadLangs(lang1, lang2): print("Reading lines...") with open(lang1) as f: lines1 = f.readlines() with open(lang2) as f: lines2 = f.readlines() pairs = [] for l1,l2 in zip(lines1,lines2): l1 = normalizeString(l1.rstrip('\n')) l2 = normalizeString(l2.rstrip('\n')) pairs.append([l1,l2]) input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs
1 2 3 4 5
MAX_LENGTH = 10 deffilterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH deffilterPairs(pairs): return [pair for pair in pairs if filterPair(pair)]
if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs) loss += criterion(decoder_output, target_tensor[di]) decoder_input = target_tensor[di] # Teacher forcing
else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di]) if decoder_input.item() == EOS_token: break
loss.backward()
encoder_optimizer.step() decoder_optimizer.step()
return loss.item() / target_length
1 2 3 4 5 6 7 8 9 10 11 12 13 14
import time import math
defasMinutes(s): m = math.floor(s / 60) s -= m * 60 return'%dm %ds' % (m, s)
deftimeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return'%s (- %s)' % (asMinutes(s), asMinutes(rs))
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)] criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1): training_pair = training_pairs[iter - 1] input_tensor = training_pair[0] target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss plot_loss_total += loss
if iter % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0
showPlot(plot_losses)
1 2 3 4 5 6 7 8 9 10 11 12 13
import matplotlib.pyplot as plt plt.switch_backend('agg') import matplotlib.ticker as ticker import numpy as np
defshowPlot(points): plt.figure() fig, ax = plt.subplots() # this locator puts ticks at regular intervals loc = ticker.MultipleLocator(base=0.2) ax.yaxis.set_major_locator(loc) plt.plot(points)
defcost(x): return x + P * pow(2,-(x/1.5)) P = float(input()) left = 0 right = 10**18 for i in range(10**5): m1 = (2*left + right) / 3 m2 = (left + 2*right) / 3 if cost(m1) < cost(m2): right = m2 else: left = m1 print(cost(left))
deftime_to_seconds(time): h, m, s = time.split(':') return int(h) * 3600 + int(m) * 60 + int(s)
ans = [] while(1): N = int(input()) if N==0: break A = [0] * 86400 for n in range(N): begin, end = input().split() begin = time_to_seconds(begin) end = time_to_seconds(end) A[begin] += 1 A[end] -= 1 cumsum = accumulate(A) ans.append(max(cumsum))
deftime_to_seconds(time): h, m, s = time.split(':') return int(h) * 3600 + int(m) * 60 + int(s)
ans = [] while(1): N = int(input()) if N==0: break A = [] for n in range(N): begin, end = input().split() begin = time_to_seconds(begin) end = time_to_seconds(end) A.append((begin,1)) A.append((end,-1)) A.sort() mx = 0 cumsum = 0 for _, x in A: cumsum += x mx = max(mx, cumsum) ans.append(mx)
import bisect N, M = map(int, input().split()) P = [0] for n in range(N): P.append(int(input())) P.sort() S = [] for p1 in P: for p2 in P: S.append(p1+p2) S.sort() ans = 0 for s in S: if M < s: break i = bisect.bisect(S, M-s)-1 ans = max(s+S[i], ans) print (ans)
N = int(input()) adj = [[] for i in range(N)] for n in range(N): V = list(map(int, input().split())) for v in V[2:]: # u,k,v1,v2,... adj[n].append(v-1) d = [0] * N # 発見時刻 f = [0] * N # 完了時刻
defdfs(v, t): t+=1# 発見したらインクリメント d[v] = t for next in adj[v]: if d[next] == 0: # 未発見なら t = dfs(next, t) t+=1# 完了してもインクリメント f[v] = t return t
t = 0 for n in range(N): if d[n]==0: # 未発見なら t = dfs(n,t) print (n+1,d[n],f[n])
from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression breast_cancer = load_breast_cancer() X = breast_cancer.data y = breast_cancer.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)