import TensorFlow let tensor = Tensor<Float>(shape: [], scalars: [42]) print(tensor)
(edited)42.0
stderr:2018-04-27 03:07:26.171304: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX
(edited)Usage: @swiftTensorFlow [SWIFT_OPTIONS] ``` [Swift Code] ```
import TensorFlow func cube<T : FloatingPoint>(_ x: T, _ str: String) -> T { print(str) return x * x * x } let dCube_dx = #gradient(of: cube, withRespectTo: .0) let _ = cube(5, "hi") let _ = dCube_dx(5, "hi")
main.swift:6:16: error: generic parameter 'T' could not be inferred let dCube_dx = #gradient(of: cube, withRespectTo: .0) ^ main.swift:2:6: note: in call to function 'cube' func cube<T : FloatingPoint>(_ x: T, _ str: String) -> T { ^
import Python let np = Python.import("numpy") print(np) let ary = np.array.call(with:[1,2,3]) print(ary)
<module 'numpy' from '/usr/local/lib/python2.7/dist-packages/numpy/__init__.pyc'> [1 2 3]
import Python let np = Python.import("numpy") let ary = np.array([1,2,3]) print(ary)
main.swift:3:19: error: cannot call value of non-function type 'PyValue' let ary = np.array([1,2,3]) ~~~~~~~~^
np.array([1,2,3])
でいけそうだったけどだめっぽいimport Python let np = Python.import("numpy") let ary = np.fooooooooooo.call(with:[1,2,3])
#0 0x0000000004176bb4 PrintStackTraceSignalHandler(void*) (/usr/bin/swift+0x4176bb4) #1 0x0000000004176ef6 SignalHandler(int) (/usr/bin/swift+0x4176ef6) #2 0x00007f40648f3390 __restore_rt (/lib/x86_64-linux-gnu/libpthread.so.0+0x11390) #3 0x00007f4052c014cd $S6Python7PyValueV13dynamicMemberACSS_tcig (/usr/lib/swift/linux/libswiftPython.so+0x114cd) #4 0x00007f4064d2108f #5 0x000000000108b39e llvm::MCJIT::runFunction(llvm::Function*, llvm::ArrayRef<llvm::GenericValue>) (/usr/bin/swift+0x108b39e) #6 0x000000000108f4c2 llvm::ExecutionEngine::runFunctionAsMain(llvm::Function*, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, char const* const*) (/usr/bin/swift+0x108f4c2) #7 0x00000000004f90a2 swift::RunImmediately(swift::CompilerInstance&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, swift::IRGenOptions&, swift::SILOptions const&) (/usr/bin/swift+0x4f90a2) #8 0x00000000004e12ac performCompile(swift::CompilerInstance&, swift::CompilerInvocation&, llvm::ArrayRef<char const*>, int&, swift::FrontendObserver*, swift::UnifiedStatsReporter*) (/usr/bin/swift+0x4e12ac) #9 0x00000000004dc72c swift::performFrontend(llvm::ArrayRef<char const*>, char const*, void*, swift::FrontendObserver*) (/usr/bin/swift+0x4dc72c) #10 0x000000000048ce4d main (/usr/bin/swift+0x48ce4d) #11 0x00007f405ead4830 __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x20830) #12 0x000000000048a6a9 _start (/usr/bin/swift+0x48a6a9) Stack dump: 0. Program arguments: /usr/bin/swift -frontend -interpret main.swift -disable-objc-interop -I /RxSwift/.build/x86_64-unknown-linux/debug -module-name main -lRxSwift
~/Library/Developer/Xcode/UserData/
に入れればすぐ出ます。let ary = np.array([1,2,3])
の代わりに let ary = np.array.call(with:[1,2,3])
としなきゃいけないのは、@dynamicCallable
がまだ提案中だから未実装だよと明記されていました。 https://github.com/tensorflow/swift/blob/master/docs/PythonInteroperability.md#dynamically-callable-typesutils/build-script -Rt
して、できた swift
で import TensorFlow
ができなくてハマってる。 func expandingShape(at shapeIndex: Int32) -> Tensor { return #tfop("ExpandDims", handle, Tensor<Int32>(shapeIndex), Tdim: Int32.self) }
#tfop
使って TensorFlow と連携してるけど、 Python は介在してないんじゃないかな?ちゃんと調べたわけじゃないけど。utils/build-script --enable-tensorflow --release-debuginfo
やってみます。 /// Lower the specified SIL function (which was formed by the partitioner) /// into a TensorFlow graph, and encode into a vector of bytes. /// std::vector<char> lowerTFGraph(SILFunction *fn);
Finally, while TensorFlow is the reason we built this infrastructure, its algorithms are independent of TensorFlow itself: the same compiler transformation can extract any computation that executes asynchronously from the host program while communicating through sends and receives. This is useful and can be applied to anything that represents computation as a graph, including other ML frameworks, other kinds of accelerators (for cryptography, graphics, transcoding, etc), and general distributed systems programming models based on graph abstractions. We are interested in exploring new applications of this algorithm in the future.
@Chris_Lattner3
のまま定着しちゃったの?swift-tensorflow-DEVELOPMENT-2018-06-01-a
で@dynamicCallable
が使える様になったぽい。 @swift-tensorflow
import TensorFlow import Python // NumPy example: let np = Python.import("numpy") // import numpy as np let a = np.arange(15).reshape(3, 5) // a = np.arange(15).reshape(3, 5) let b = np.array([6, 7, 8]) // b = np.array([6, 7, 8])
(edited)sudo xcode-select -s /Applications/Xcode-beta.app/
しないと実行できなくなってた。a.dot(b)
と a ⊗ b
なくなってた
https://github.com/apple/swift/commit/6d1cc673ce6091450ad1f23445839bf82c1bebc5#diff-c0c39336cae41de4fb55e3dfadeed5a5a.dotProduct(with: b)
だったのを a.dot(b)
に直してたんだけど・・・。@available(*, renamed: "matmul(_:_:)")
@dynamicCallable
が入ってるんだけど、まだ未完成で色々問題があるっぽい。 (edited)@dynamicCallable
/ @dynamicMemberLookup
使って scikit-learn 使ってみてるけどなかなか楽しい。func lossAndGradient(for input: Tensor<Float>, using model: IrisParameters, labels: Tensor<Int32>) -> (Float, IrisParameters) { let (loss, (_, modelGrad)) = #valueAndGradient(loss(for:using:labels:), wrt: .0, .1)(input, model, labels) return (loss, modelGrad) }
Tensor
とかも。Eugene Burmako is working on Swift for TensorFlow at Google. Before joining Google, he made major contributions to Scala at EPFL and Twitter, founding Reasonable Scala compiler, Scalameta and Scala macros. Eugene loves compilers, and his mission is to change the world with compiler technology
import TensorFlow let ten: Tensor<Float> = [0, 0.3, 0.5, 0.7, 1.0] print(sigmoid(ten)) print(1 / (1 + exp(-ten))) for i in 0..<5 { print("###\(ten[i])") print(gradient(of: sigmoid)(ten[i])) print(gradient(of: { 1 / (1 + exp(-$0)) })(ten[i])) }
[ 0.5, 0.5744425, 0.6224593, 0.66818774, 0.7310586] [ 0.5, 0.5744425, 0.62245935, 0.66818774, 0.7310586] ###0.0 0.0 0.25 ###0.3 0.21000001 0.24445829 ###0.5 0.25 0.23500371 ###0.7 0.21000001 0.22171286 ###1.0 0.0 0.19661197
print(gradient(of: sigmoid)(sigmoid(ten[i])))
だと正しいみたいです。 出力値を渡す仕様なのかと思いましたがsqrtとかは入力値で微分値が出るんですよねimport TensorFlow var x: Tensor<Float> = [1.0] print(gradient(of: { sigmoid($0) })(x[0])) // 0.0 -> wrong? print(gradient(of: sigmoid)(sigmoid(x[0]))) // 0.19... -> correct? print(gradient(of: sqrt)(x[0])) // 0.5 -> correct
/// Returns the sigmoid of the specified tensor element-wise. /// Specifically, computes `1 / (1 + exp(-x))`. @inlinable @differentiable(vjp: _vjpSigmoid) public func sigmoid<T: TensorFlowFloatingPoint>(_ x: Tensor<T>) -> Tensor<T> { Raw.sigmoid(x) } @inlinable internal func _vjpSigmoid<T: TensorFlowFloatingPoint>( _ x: Tensor<T> ) -> (Tensor<T>, (Tensor<T>) -> Tensor<T>) { (sigmoid(x), { v in Raw.sigmoidGrad(x, dy: v) }) }
Raw.sigmoidGrad
がこれなのでyを渡すべきところでxを渡してる? https://raw.githubusercontent.com/tensorflow/swift-apis/master/Sources/TensorFlow/Bindings/RawOpsGenerated.swift
/// Computes the gradient of the sigmoid of `x` wrt its input. /// /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and /// `dy` is the corresponding input gradient. @inlinable @inline(__always) public static func sigmoidGrad<T: FloatingPoint & TensorFlowScalar>( _ y: Tensor<T>, dy: Tensor<T> ) -> Tensor<T>
(edited)FILE*
に見えるメモリは使えるよ!jupyter-kernelspec list
で Swift が表示されなくなってしまったのですが、他の皆さんはいかがでしょうか…。 https://twitter.com/treastrain/status/1374339335407894531