query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients() when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times. We append all the embeddings gradients to a list.
def _register_embedding_gradient_hooks(self, embedding_gradients): def hook_layers(module, grad_in, grad_out): embedding_gradients.append(grad_out[0]) backward_hooks = [] embedding_layer = self.get_embeddings_layer() backward_hooks.append(embedding_layer.register_backward_hook(hook_layers)) return backward_hooks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first ...
[ "0.744036", "0.7207515", "0.68466437", "0.6619593", "0.6545222", "0.65270805", "0.6505459", "0.64318466", "0.6385007", "0.63628876", "0.63604456", "0.63604456", "0.62412906", "0.6231469", "0.62253386", "0.61755383", "0.609307", "0.60908824", "0.60908824", "0.60785884", "0.603...
0.8470942
0
some tokenizers don't have 'eos_token' and 'bos_token' attributes. Thus, we need some trick to get them.
def special_tokens(self, ): if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None: special_tokens = self.tokenizer.build_inputs_with_special_tokens([]) special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens) self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token return special_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def parse(self, tokenizer):\n pass", "def tokens():\n pass", "def get_tokens(self, document):\n raise NotImplementedError()", "def get_tokens...
[ "0.6021182", "0.5951054", "0.58588576", "0.5789484", "0.5745784", "0.56824887", "0.5654901", "0.5648043", "0.5638188", "0.5631488", "0.5630968", "0.56133425", "0.5593994", "0.5592292", "0.5575906", "0.55579054", "0.5544896", "0.549606", "0.54886556", "0.54866934", "0.5471206"...
0.62158376
0
Compute the cosine similarity between each word in the vocab and each word in the source
def _pairwise_dot_product(self, src_embeds, vocab_embeds, cosine=False): if cosine: src_embeds = F.normalize(src_embeds, dim=-1, p=2) vocab_embeds = F.normalize(vocab_embeds, dim=-1, p=2) # dot product dot_product = torch.einsum("bij,kj->bik", (src_embeds, vocab_embeds)) return dot_product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def cosine_similarit...
[ "0.8221675", "0.7763575", "0.74975723", "0.7489508", "0.7445475", "0.7369543", "0.73244905", "0.728064", "0.7235221", "0.7210119", "0.7207516", "0.7196803", "0.71352804", "0.7082981", "0.7055071", "0.70352226", "0.69762695", "0.6966904", "0.69279665", "0.6920698", "0.69188845...
0.0
-1
Compute the euclidean distance between each word in the vocab and each word in the source.
def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False): # compute square norm to avoid compute all the directions vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2 src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2 # dot product dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds) # reshape for broadcasting vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1 # compute squared difference sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product if squared: return sq_norm else: # relu + epsilon for numerical stability sq_norm = F.relu(sq_norm) + 1e-20 # take the square root return sq_norm.sqrt()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = d...
[ "0.7123041", "0.6798625", "0.6672034", "0.64250857", "0.6401553", "0.6378859", "0.63724816", "0.6371111", "0.6301999", "0.6265582", "0.6255312", "0.62346095", "0.62243164", "0.62200135", "0.62105745", "0.6194787", "0.6186329", "0.6180648", "0.6174867", "0.61723065", "0.617060...
0.747233
0
If your model receive inputs in another way or you computing not like in this example simply override this method.
def forward_step(self, batch): input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device) attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device) outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] _, _, num_label = outputs.shape """ outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim) labels : (batch, seq_length) => (seq_length,) """ outputs = outputs.view(-1, num_label) labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1) batch_losses = self.criterion(outputs, labels) loss = torch.mean(batch_losses) # mean average self.batch_output = [input_ids, outputs] return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dynamic_model(self, input_val: float) -> float:\n pass", "def inputs(self):\n return NotImplementedError", "def __call__(self, *inputs):\n raise NotImplementedError", "def processInputs(self):", "def __call__(self, n_input: int) -> Model:\n\n raise NotImplementedError()", "def...
[ "0.687905", "0.6863665", "0.6783461", "0.66524166", "0.6625633", "0.660951", "0.6529107", "0.65070593", "0.65055317", "0.6484396", "0.6459398", "0.64080626", "0.63803595", "0.636314", "0.636314", "0.636314", "0.636314", "0.6327075", "0.63064533", "0.62982094", "0.6289941", ...
0.0
-1
You can override this method if you want to change the format of outputs (e.g., storing gradients)
def update_output(self, ): input_ids, outputs, grads, adv_tokens = self.batch_output probs = softmax(outputs, dim=-1) probs, labels = torch.max(probs, dim=-1) tokens = [ self.tokenizer.convert_ids_to_tokens(input_ids_) for input_ids_ in input_ids ] embedding_grads = grads.sum(dim=2) # norm for each sequence norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter # normalizing for i, norm in enumerate(norms): embedding_grads[i] = torch.abs(embedding_grads[i]) / norm batch_output = [] # check probs, labels shape labels = torch.reshape(labels, (1, -1)) probs = torch.reshape(probs, (1, -1)) iterator = zip(tokens, probs, embedding_grads, labels) for example_tokens, example_prob, example_grad, example_label in iterator: example_dict = dict() # as we do it by batches we has a padding so we need to remove it example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token] example_dict['tokens'] = example_tokens example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)] example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item() example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() batch_output.append(example_dict) return batch_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out(self, inputs):", "def _save_grad_output(self, mod, grad_input, grad_output):\n if mod.training:\n self.state[mod][\"gy\"] = grad_output[0] * grad_output[0].size(0)", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def outputs(self):\n return s...
[ "0.6842611", "0.63906467", "0.62235004", "0.62234104", "0.62234104", "0.62234104", "0.62234104", "0.6200916", "0.61630815", "0.6138572", "0.6102943", "0.6075281", "0.6048927", "0.60099345", "0.60045785", "0.59973353", "0.5995878", "0.59723157", "0.5926936", "0.5917695", "0.59...
0.0
-1
Convert a single position. This is done for easy code sharing with other tools. Skyfield does support arrays of positions.
def _convert_radec_to_altaz(ra, dec, lon, lat, height, time): radec = Star(ra=Angle(degrees=ra), dec=Angle(degrees=dec)) earth = load(EPHEMERIS)['earth'] location = earth + Topos(longitude_degrees=lon, latitude_degrees=lat, elevation_m=height * 1000.0) ts = load.timescale() obstime = ts.from_astropy(Time(time, scale='utc')) alt, az, _ = location.at(obstime).observe(radec).apparent().altaz(pressure_mbar=0) return dict(az=az.degrees, alt=alt.degrees)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def get_position(self, position):", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def positions_to_coords(self, positions):\n return [self.to_coords(px, py) for (px, py) in po...
[ "0.6353608", "0.62130916", "0.6069126", "0.60215694", "0.6008328", "0.59653753", "0.59585685", "0.59379125", "0.59244585", "0.5815883", "0.5807676", "0.5784127", "0.57744235", "0.5750637", "0.57444346", "0.5740382", "0.5734146", "0.57155126", "0.5699236", "0.5683348", "0.5664...
0.0
-1
If TASK_USE_PATH is set rely on PATH to look for task binaries. Otherwise ../src/ is used by default.
def task_binary_location(cmd="task"): return binary_location(cmd, TASK_USE_PATH)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_task_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')", "def test_taskmod_no_taskfile(modpath):\n sys.meta_path.append(TaskImporter())\n task = import_module(modpath)\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert...
[ "0.6477157", "0.57612556", "0.5689633", "0.56765836", "0.55889344", "0.55805033", "0.55210114", "0.5507863", "0.5440326", "0.5436243", "0.54087734", "0.5403501", "0.5345792", "0.5335856", "0.53229564", "0.5279437", "0.5270501", "0.5263024", "0.5223266", "0.52160585", "0.51700...
0.71928936
0
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def set_path():\n impor...
[ "0.64583826", "0.6119704", "0.60500836", "0.5732794", "0.5658576", "0.56554246", "0.5512849", "0.5505444", "0.54935056", "0.5483672", "0.5481498", "0.5455168", "0.5439266", "0.5433778", "0.54287785", "0.5424196", "0.5423426", "0.5394256", "0.53781176", "0.5329684", "0.5325822...
0.6302872
1
Wait for condition to return anything other than None
def wait_condition(cond, timeout=1, sleeptime=.01): # NOTE Increasing sleeptime can dramatically increase testsuite runtime # It also reduces CPU load significantly if timeout is None: timeout = 1 if timeout < sleeptime: print("Warning, timeout cannot be smaller than", sleeptime) timeout = sleeptime # Max number of attempts until giving up tries = int(timeout / sleeptime) for i in range(tries): val = cond() if val is not None: break sleep(sleeptime) return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def waitUntilSuccess():", "def _wait_for(self, check_func, desc, result=False, timeout=200):\r\n if result:\r\n return Promise(check_func, desc, timeout=timeout).fulfill()\r\n else:\r\n return EmptyPromise(check_func, desc, timeout=timeout).fulfill()", "def await_condition(c...
[ "0.7600536", "0.71974915", "0.69235563", "0.6832884", "0.68265367", "0.66899425", "0.66899425", "0.66899425", "0.66899425", "0.6640798", "0.66129583", "0.655346", "0.6533913", "0.6519288", "0.6512367", "0.6503648", "0.6491821", "0.6419775", "0.6390421", "0.63593", "0.6358844"...
0.66056544
11
Wait for process to finish
def wait_process(pid, timeout=None): def process(): try: os.kill(pid, 0) except OSError: # Process is dead return True else: # Process is still ticking return None return wait_condition(process, timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_finish(self):\r\n self.proc.join()", "def wait(self):\n self.Popen.wait()", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait(self):\n\n for output in self.proc.communicate():\n if output is...
[ "0.8497128", "0.7932346", "0.7532554", "0.7532554", "0.7532554", "0.7532554", "0.7501503", "0.73955196", "0.72686297", "0.72191155", "0.7064616", "0.7028927", "0.701474", "0.69731134", "0.6924637", "0.68755513", "0.68720436", "0.6823285", "0.6817343", "0.68091136", "0.6809113...
0.0
-1
Read/Write output/input of given process. This function is meant to be executed in a thread as it may block
def _queue_output(arguments, pidq, outputq): kwargs = arguments["process"] input_data = arguments["input"].encode("utf-8") if arguments["input"] else None try: proc = Popen(**kwargs) except OSError as e: # pid None is read by the main thread as a crash of the process pidq.put(None) outputq.put(( "", ("Unexpected exception caught during execution of taskw: '{0}' . " "If you are running out-of-tree tests set TASK_USE_PATH=1 " "in shell env before execution and add the " "location of the task(d) binary to the PATH".format(e)), 255)) # false exitcode return # Put the PID in the queue for main process to know. pidq.put(proc.pid) # Send input and wait for finish out, err = proc.communicate(input_data) if sys.version_info > (3,): out, err = out.decode('utf-8'), err.decode('utf-8') # Give the output back to the caller outputq.put((out, err, proc.returncode))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n...
[ "0.6465506", "0.6412228", "0.62609875", "0.6253763", "0.61260885", "0.6119722", "0.60304314", "0.59641206", "0.59629256", "0.5924618", "0.59051806", "0.5897396", "0.5815422", "0.57975465", "0.57944274", "0.578926", "0.57733274", "0.5732314", "0.5685502", "0.56787324", "0.5650...
0.6050411
6
Fetch output from taskw subprocess queues
def _retrieve_output(thread, timeout, queue, thread_error): # Try to join the thread on failure abort thread.join(timeout) if thread.is_alive(): # Join should have killed the thread. This is unexpected raise TimeoutWaitingFor(thread_error + ". Unexpected error") # Thread died so we should have output try: # data = (stdout, stderr, exitcode) data = queue.get(timeout=timeout) except Empty: data = TimeoutWaitingFor("streams from TaskWarrior") return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__out...
[ "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.6712736", "0.6354121", "0.6280556", "0.626264", "0.6234594", "0.6165189", "0.608507", "0.6033077", "0.59824157", "0.5962959", "0.59550273", "0.5916649", "0.59026563", "0.584164", "0.5835302...
0.6057725
13
Collect output from the subprocess without blocking the main process if subprocess hangs.
def _get_output(arguments, timeout=None): # NOTE Increase this value if tests fail with None being received as # stdout/stderr instead of the expected content output_timeout = 0.1 # seconds pidq = Queue() outputq = Queue() t = Thread(target=_queue_output, args=(arguments, pidq, outputq)) t.daemon = True t.start() try: pid = pidq.get(timeout=timeout) except Empty: pid = None # Process crashed or timed out for some reason if pid is None: return _retrieve_output(t, output_timeout, outputq, "TaskWarrior to start") # Wait for process to finish (normal execution) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "TaskWarrior thread to join") # If we reach this point we assume the process got stuck or timed out for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL): # Start with lower signals and escalate if process ignores them try: os.kill(pid, signal.SIGABRT) except OSError as e: # ESRCH means the process finished/died between last check and now if e.errno != errno.ESRCH: raise # Wait for process to finish (should die/exit after signal) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "TaskWarrior to die") # This should never happen but in case something goes really bad raise OSError("TaskWarrior stopped responding and couldn't be killed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File posit...
[ "0.7034326", "0.66883934", "0.63439405", "0.6276364", "0.6119922", "0.6063825", "0.5989354", "0.588833", "0.5796989", "0.578598", "0.56847566", "0.5676479", "0.5609866", "0.5598541", "0.55334884", "0.55334884", "0.55334884", "0.55334884", "0.55307305", "0.5524578", "0.5516227...
0.553726
14
Keep an inmemory cache of function results given its inputs
def memoize(obj): cache = obj.cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{}...
[ "0.7994633", "0.7447933", "0.7371433", "0.73587483", "0.73249936", "0.73011243", "0.7296001", "0.7275171", "0.72417337", "0.7185484", "0.71638167", "0.7161567", "0.71558344", "0.71247685", "0.7054305", "0.7026194", "0.6991613", "0.6981695", "0.6979137", "0.6976191", "0.696795...
0.0
-1
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n ...
[ "0.77106726", "0.7453694", "0.727641", "0.6943331", "0.6540709", "0.639578", "0.60801244", "0.59047854", "0.5880955", "0.5878199", "0.57527226", "0.5732597", "0.5709009", "0.5672643", "0.5576627", "0.5575957", "0.55535084", "0.5497285", "0.5452454", "0.5445605", "0.5425212", ...
0.7460819
1
Parse .data files on the client and server treating files as JSON
def parse_datafile(file): data = [] with open(file) as fh: for line in fh: line = line.rstrip("\n") # Turn [] strings into {} to be treated properly as JSON hashes if line.startswith('[') and line.endswith(']'): line = '{' + line[1:-1] + '}' if line.startswith("{"): data.append(json.loads(line)) else: data.append(line) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ingest_json_file(request):\n path = save_file(request) \n try:\n with open(path, encoding='utf-8') as f:\n data = json.loads(f.read())\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def _get_data_file(self, data_path):\n\n ...
[ "0.6873522", "0.660964", "0.6609381", "0.6535058", "0.6535058", "0.64858943", "0.6374787", "0.63390714", "0.63368833", "0.62513703", "0.6144247", "0.61405396", "0.61333793", "0.6131228", "0.61294943", "0.6124854", "0.61158955", "0.6082589", "0.6079711", "0.604982", "0.6040914...
0.63090736
9
Create a temporary file that is removed at process exit
def mkstemp(data): def rmtemp(name): try: os.remove(name) except OSError: pass f = tempfile.NamedTemporaryFile(delete=False) f.write(data.encode('utf-8') if not isinstance(data, bytes) else data) f.close() # Ensure removal at end of python session atexit.register(rmtemp, f.name) return f.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def temporary_file(request):\n file_handle, path = tempfile.mkstemp()\n os.close(file_handle)\n\n def cleanup():\n \"\"\"Remove temporary file.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n request.addfinalizer(cleanup)\n return path", "def ...
[ "0.7820664", "0.781029", "0.74709785", "0.74351716", "0.7366692", "0.72586644", "0.724302", "0.72234035", "0.7195028", "0.71175826", "0.7109041", "0.7028888", "0.7014582", "0.6984096", "0.6944709", "0.6938682", "0.6842143", "0.683967", "0.6832244", "0.6778418", "0.67015445", ...
0.69565785
14
Create a temporary executable file that is removed at process exit
def mkstemp_exec(data): name = mkstemp(data) os.chmod(name, 0o755) return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_temporary_file():\n f = NamedTemporaryFile(delete=False)\n return f.name", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def tempfile():\n return mkstemp()[1]", "def temporary_file(request):\n file_handle, path = tempfile.mkstemp()\n ...
[ "0.66870767", "0.65108424", "0.63702273", "0.63672537", "0.63322586", "0.6330699", "0.6312687", "0.629912", "0.6277841", "0.62701666", "0.62639666", "0.6252135", "0.6187429", "0.61593467", "0.6120302", "0.6027149", "0.59966457", "0.59905773", "0.5983607", "0.5978929", "0.5957...
0.6263371
12
Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter
def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False): amount_currency = False currency_id = False if src_currency and src_currency != company_currency: amount_currency = amount amount = src_currency.with_context(self._context).compute(amount, company_currency) currency_id = src_currency.id debit = amount > 0 and amount or 0.0 credit = amount < 0 and -amount or 0.0 if invoice_currency and invoice_currency != company_currency and not amount_currency: amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency) currency_id = invoice_currency.id return debit, credit, amount_currency, currency_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company...
[ "0.76655096", "0.6802662", "0.67237234", "0.6577972", "0.65410954", "0.6217159", "0.620707", "0.61840576", "0.59770834", "0.59099656", "0.5873708", "0.5872332", "0.58294046", "0.58154744", "0.578612", "0.57646793", "0.5753153", "0.57461447", "0.57427317", "0.57381535", "0.572...
0.71101564
1
Aim to create a sidebard card.
def test_sidebar_card_object(self): target_dict = { "itemType": "CARD", "align": "top", "showCloseAction": True, "showActionButton": True, "actionButtonUri": "https://changelog.md", "actionButtonUriTarget": "_new", "actionButtonName": "Discover more", "actionButtonName_nl": "Ontdek meer", "displayText": "This project ...", "displayText_nl": "Dit project ...", "displayTextAlign": "left", "showBackground": True, "minimumAccessLevel": "is_member", "maximumAccessLevel": "is_supervisor", } sidebar_card = SideBarCard( side_bar_manager=self.manager, alignment=SidebarItemAlignment.TOP, show_close_action=True, show_action_button=True, action_button_name="Discover more", action_button_uri="https://changelog.md", action_button_uri_target=URITarget.NEW, display_text="This project ...", display_text_align=Alignment.LEFT, show_background=True, minimum_access_level=SidebarAccessLevelOptions.IS_MEMBER, maximum_access_level=SidebarAccessLevelOptions.IS_SUPERVISOR, displayText_nl="Dit project ...", actionButtonName_nl="Ontdek meer", ) card_dict = sidebar_card.as_dict() self.maxDiff = None self.assertDictEqual(target_dict, card_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def card(self):\r\n return Card(self)", "def card(self):\r\n return Card(self)", "def create_uno_deck():\r\n pass", "def card_factory(rank,suit):\n pass", "def make_card_surface(self):\n\n surf = pygame.Surface((card_dimensions))\n surf.fill(CARD_OUTLINE)\n pygame.d...
[ "0.65023726", "0.65023726", "0.63833416", "0.6296066", "0.6287493", "0.6243154", "0.6175611", "0.6166363", "0.61019343", "0.6016748", "0.6007979", "0.59866345", "0.59609824", "0.5881401", "0.587644", "0.5873574", "0.5859107", "0.58429474", "0.58415014", "0.58415014", "0.58415...
0.0
-1
A simple test to create a bucket with maxTTL and check whether new creates with greater exp are deleted when maxTTL has lapsed
def test_maxttl_lesser_doc_expiry(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500) self.sleep(int(self.maxttl), "waiting for all docs to expire per maxTTL rule...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) + 500, self.maxttl, self.maxttl, items)) if items > 0: self.fail("Bucket maxTTL of {0} is not honored".format(self.maxttl)) else: self.log.info("SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) + 500, self.maxttl, self.maxttl, items))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n sel...
[ "0.8141814", "0.7350605", "0.7336613", "0.7202318", "0.7191125", "0.6874046", "0.6224646", "0.6203937", "0.62027216", "0.605416", "0.59724385", "0.5928285", "0.58961767", "0.5894235", "0.58194166", "0.5818683", "0.5788125", "0.5779113", "0.57711524", "0.5734831", "0.5686498",...
0.72839487
3
maxTTL is set to 200s in this test, Docs have lesser TTL.
def test_maxttl_greater_doc_expiry(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100) self.sleep(int(self.maxttl-100), "waiting for all docs to expire per maxTTL rule...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) - 100, self.maxttl-100, self.maxttl-100, items)) if items == 0: self.log.info("SUCCESS: Docs with lesser expiry deleted") else: self.fail("FAIL: Doc with lesser expiry still present past ttl")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\...
[ "0.7520884", "0.7474054", "0.717667", "0.7172085", "0.67970985", "0.6264787", "0.62576485", "0.60355556", "0.59813035", "0.56774914", "0.5664216", "0.5654567", "0.56533533", "0.56533533", "0.56383187", "0.56383187", "0.5615611", "0.5612152", "0.56103104", "0.55894196", "0.551...
0.7362958
2
1. Create a bucket with no max_ttl 2. Upload 1000 docs with exp = 100s 3. Set maxTTL on bucket as 60s 4. After 60s, run expiry pager, get item count, must be 1000 5. After 40s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 60s and get item count, must be 0
def test_set_maxttl_on_existing_bucket(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=60) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 60s, item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Items with larger expiry before maxTTL updation deleted!") self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry set before maxTTL " "updation not deleted after elapsed TTL!") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry not " "deleted after elapsed maxTTL!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, ...
[ "0.74554807", "0.73381984", "0.73129797", "0.7263034", "0.69875336", "0.6090414", "0.56455797", "0.5475571", "0.5398008", "0.53416365", "0.53413856", "0.5322587", "0.53129023", "0.5301066", "0.5245236", "0.522405", "0.52154046", "0.5207431", "0.5185581", "0.5156831", "0.51409...
0.7790331
0
Test 1. min 0 2. max 2147483647q 3. default 0 4. negative values, date, string
def test_maxttl_possible_values(self): # default rest = RestConnection(self.master) default_maxttl = rest.get_bucket_maxTTL() if default_maxttl != 0: self.fail("FAIL: default maxTTL if left unset must be 0 but is {0}".format(default_maxttl)) self.log.info("Verified: default maxTTL if left unset is {0}".format(default_maxttl)) # max value try: self._update_bucket_maxTTL(maxttl=2147483648) except Exception as e: self.log.info("Expected exception : {0}".format(e)) try: self._update_bucket_maxTTL(maxttl=2147483647) except Exception as e: self.fail("Unable to set maxTTL=2147483647, the max permitted value") else: self.log.info("Verified: Max value permitted is 2147483647") else: self.fail("Able to set maxTTL greater than 2147483647") # min value try: self._update_bucket_maxTTL(maxttl=0) except Exception as e: self.fail("Unable to set maxTTL=0, the min permitted value") else: self.log.info("Verified: Min value permitted is 0") # negative value try: self._update_bucket_maxTTL(maxttl=-60) except Exception as e: self.log.info("Verified: negative values not permitted, exception : {0}".format(e)) else: self.fail("FAIL: Able to set a negative maxTTL") # date/string try: self._update_bucket_maxTTL(maxttl="12/23/2016") except Exception as e: self.log.info("Verified: string not permitted, exception : {0}".format(e)) else: self.fail("FAIL: Able to set a date string maxTTL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def intrange(value, name=\"\", value_min=None, value_max=None, zero=False):\n value = __integer(value, \"%s value\" % n...
[ "0.6304185", "0.62069917", "0.6080596", "0.6072301", "0.6067451", "0.6045868", "0.6025268", "0.6013756", "0.6009615", "0.5928551", "0.5872796", "0.5859247", "0.5829964", "0.5817812", "0.58132696", "0.5796257", "0.5796257", "0.5784377", "0.57772344", "0.57770634", "0.577266", ...
0.5408003
82
1. Create a bucket with ttl = 200s 2. Upload 1000 docs with exp = 100s 3. Update ttl = 40s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 60s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 40s and get item count, must be 0
def test_update_maxttl(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=40) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s" " updated maxttl = 40s, after 40s item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Updated ttl affects docs with larger expiry before updation!") self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s" " updated maxttl = 40s, after 100s item count = {0}".format(items)) if items != 0: self.fail("FAIL: Docs with 100s as expiry before maxTTL updation still alive!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n sel...
[ "0.73347205", "0.72373694", "0.6889847", "0.6790015", "0.6164027", "0.56535953", "0.54997987", "0.5486921", "0.54866165", "0.54771954", "0.5461768", "0.53524595", "0.53395146", "0.53197896", "0.5305307", "0.5276118", "0.5267235", "0.52440965", "0.5219622", "0.52176183", "0.52...
0.69277155
2
1. Create a bucket with ttl = 60s 2. Upload 1000 docs with exp = 40s 3. After 20s, Update docs with exp = 60s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 20s, run expiry pager again and get item count, must be 0
def test_maxttl_with_doc_updates(self): rest = RestConnection(self.master) for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=40) self.sleep(20, "waiting to update docs with exp=60s...") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=60) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != self.num_items: self.fail("FAIL: Docs with updated expiry deleted unexpectedly!") self.sleep(20, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != 0: self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n sel...
[ "0.70659494", "0.6699331", "0.6606719", "0.65539116", "0.58606344", "0.5722448", "0.5579716", "0.5559856", "0.5538106", "0.5445731", "0.5444631", "0.54434264", "0.54030365", "0.5382592", "0.53746575", "0.53511035", "0.53469026", "0.53101665", "0.5309392", "0.5298697", "0.5277...
0.71103334
0
Use active_ids from the context to fetch the leads
def default_get(self, cr, uid, fields, context=None): if context is None: context = {} record_ids = context.get('active_ids', False) res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context) if record_ids: opp_ids = [] opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context) for opp in opps: opp_ids.append(opp.id) if 'lead_ids' in fields: res.update({'lead_ids': opp_ids}) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def queryset(self, request):\n qs = super(AdRepLeadAdmin, self).queryset(request)\n qs = AdRepLead.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs", "def get_locations_by_ids(self, id_...
[ "0.6057764", "0.52192557", "0.5219104", "0.50897825", "0.5034502", "0.5022137", "0.4999737", "0.49886566", "0.49597502", "0.4957497", "0.49499336", "0.49414816", "0.49347013", "0.489892", "0.4863161", "0.48234197", "0.4823024", "0.48136124", "0.481186", "0.47834936", "0.47805...
0.69877815
0
Use lead_ids from the wizard and set to new stage
def action_multi_lead_stage(self, cr, uid, ids, context=None): if context is None: context = {} wizard = self.browse(cr, uid, ids[0], context=context) lead_ids = wizard.lead_ids if lead_ids: for lead in lead_ids: self.pool.get('crm.lead').write(cr, uid, [lead.id], {'stage_id':wizard.stage_id.id},context) return {'type': 'ir.actions.act_window_close'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onchange_stage_id_values(self, stage_id):\n if not stage_id:\n return {}\n print('1111')\n\n call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids))\n call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids))\n contact_meeting = len(...
[ "0.58470714", "0.55518115", "0.5546699", "0.5379987", "0.53570205", "0.5289893", "0.5229131", "0.5181128", "0.5130612", "0.5128466", "0.50979745", "0.5055515", "0.505321", "0.50443345", "0.50421935", "0.50367343", "0.503288", "0.49982783", "0.497238", "0.49644795", "0.4961761...
0.64389664
0
Return json format of results
def display_json(self, results, verbose): print(json.dumps(results))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def format_results(results):\n parsed = json.loads(results.to_json())\n json_resu...
[ "0.7852767", "0.7314439", "0.715554", "0.7062592", "0.7062298", "0.68037283", "0.6758764", "0.6735392", "0.66818523", "0.6601434", "0.6596625", "0.65821993", "0.6529765", "0.6528932", "0.6514097", "0.6506638", "0.6498062", "0.6485245", "0.64679945", "0.64450043", "0.64401627"...
0.7285609
2
r"""The initial call to start propagating messages.
def propagate(self, z_agg, edge_index, **kwargs): # assert aggr in ['add', 'mean', 'max'] # agg_list = self.search_space['agg'] kwargs['edge_index'] = edge_index size = None message_args = [] for arg in self.message_args: if arg[-2:] == '_i': # If arguments ends with _i then include indic tmp = kwargs[arg[:-2]] # Take the front part of the variable | Mostly it will be 'x', size = tmp.size(0) message_args.append(tmp[edge_index[0]]) # Lookup for head entities in edges elif arg[-2:] == '_j': tmp = kwargs[arg[:-2]] # tmp = kwargs['x'] size = tmp.size(0) message_args.append(tmp[edge_index[1]]) # Lookup for tail entities in edges else: message_args.append(kwargs[arg]) # Take things from kwargs update_args = [kwargs[arg] for arg in self.update_args] # Take update args from kwargs out = self.message(*message_args) # out = scatter_(z_agg_hard, self.search_space['agg'], out, edge_index[0], dim_size=size) out = scatter_(z_agg, self.search_space['agg'], out, edge_index[0], dim_size=size) out = self.update(out, *update_args) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n if self._pumping:\n return\n self._pumping = True\n self._global_reactor.callLater(0, self._pump_once)", "def beginStep(self, message=''):\n if not self.initialized:\n self.start(message)", "def __enter__(self):\n print(self.msg)\n ...
[ "0.6312824", "0.61891603", "0.606415", "0.60143113", "0.591316", "0.5910299", "0.5900974", "0.5852277", "0.5843216", "0.5824698", "0.58228225", "0.57953435", "0.5789157", "0.5789157", "0.57543075", "0.5754194", "0.5743585", "0.5737249", "0.5731807", "0.5731807", "0.57178223",...
0.0
-1
r"""Updates node embeddings in analogy to
def update(self, aggr_out): # pragma: no cover return aggr_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_embeddings(self):", "def back_entities_embedding(self, entity):\n self.ent_embs.ent_embs.weight.data[entity] = self.source_entity", "def generate_embeddings_with_prev(self, old_emb, dims):\n self.embeddings = old_emb\n for node in self.nx_graph.nodes_iter():\n if self.nx...
[ "0.6827302", "0.6605902", "0.6546911", "0.6507674", "0.6356848", "0.63033694", "0.6274012", "0.6221701", "0.60340726", "0.59661525", "0.59410673", "0.58897674", "0.5863574", "0.5854144", "0.5821187", "0.5804857", "0.5790733", "0.57406765", "0.5728432", "0.5712224", "0.5701808...
0.0
-1
A view to show all products, including ability to search
def portfolio(request): projects = Project.objects.all() categories = None if request.GET: if 'category' in request.GET: categories = request.GET['category'].split(',') projects = projects.filter(category__name__in=categories) categories = ProjectCategory.objects.filter(name__in=categories) context = { 'projects': projects, 'current_categories': categories, } return render(request, 'portfolio/portfolio.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})", "def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})", "def products(request):\n\n r...
[ "0.82876235", "0.8283801", "0.78482085", "0.76325524", "0.7614649", "0.7584741", "0.74856204", "0.74783254", "0.74655133", "0.7386778", "0.7367176", "0.7330932", "0.72938323", "0.7253225", "0.72413594", "0.7230313", "0.72217464", "0.720815", "0.711731", "0.7079289", "0.704237...
0.0
-1
A view to add a new portfolio project
def add_project(request): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): project = form.save() messages.success(request, 'Project added successfully!') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to add project.\ # Please ensure the form is valid') else: form = ProjectForm() form = ProjectForm() template = 'portfolio/add_project.html' context = { 'form': form, } return render(request, template, context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n re...
[ "0.7893704", "0.7302018", "0.7279046", "0.7119255", "0.70725876", "0.7025085", "0.6988927", "0.6972085", "0.6608903", "0.65972", "0.659065", "0.6497519", "0.64028376", "0.6389602", "0.6371995", "0.63446647", "0.6344435", "0.6330495", "0.63034815", "0.6294346", "0.6290368", ...
0.7959094
0
A view to edit a portfolio project
def edit_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES, instance=project) if form.is_valid(): form.save() messages.success(request, 'Successfully updated project') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to update project. \ # Please ensure the form is valid.') else: form = ProjectForm(instance=project) messages.info(request, f'You are editing {project.name}') template = 'portfolio/edit_project.html' context = { 'form': form, 'project': project, } return render(request, template, context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = p...
[ "0.7728187", "0.73595434", "0.7352115", "0.7184356", "0.703075", "0.67557067", "0.6421052", "0.6396073", "0.6380097", "0.6355604", "0.6334676", "0.63148147", "0.62868273", "0.6275144", "0.6263248", "0.62387496", "0.61801016", "0.6175357", "0.61719286", "0.61687547", "0.616029...
0.7640755
1
A view to delete a project from the portfolio
def delete_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) project.delete() messages.success(request, 'Project deleted!') return redirect(reverse('portfolio'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))", "def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet,...
[ "0.78291446", "0.77674896", "0.7705836", "0.7590049", "0.757986", "0.7458269", "0.7449735", "0.7428579", "0.7415701", "0.725367", "0.72272354", "0.71767354", "0.7172579", "0.6984525", "0.69331306", "0.69194317", "0.6911854", "0.6911854", "0.6897094", "0.6887393", "0.68611", ...
0.8196082
0
wraps builtin print for additional extendability
def log(*arg): context = str(*arg) print("[Texture Builder] {}".format(context))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def print(self, *args, **kwargs):\n print(*args,...
[ "0.72718763", "0.7198888", "0.7185116", "0.71198493", "0.69697016", "0.6957658", "0.6845616", "0.6798065", "0.6773647", "0.67016596", "0.66754776", "0.6661273", "0.66153914", "0.6614028", "0.6603238", "0.65869075", "0.6565293", "0.65613693", "0.65500474", "0.64926285", "0.646...
0.0
-1
start oberserver in another separated thread, and WatchDog thread only monitors it
def run(self): observer = Observer() observer.schedule(self.ehandler, "./gl", True) observer.start() observer.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaem...
[ "0.6764007", "0.661172", "0.6549474", "0.64551026", "0.6381024", "0.6361601", "0.63481355", "0.63394445", "0.6275483", "0.62610716", "0.6260074", "0.62537545", "0.6252852", "0.6233397", "0.6223834", "0.61703527", "0.61615974", "0.6160586", "0.61547226", "0.6127885", "0.611809...
0.0
-1
generate simplest screen filling quad
def screen_vao(cls, gl, program): vbo = [ -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, +1.0, +1.0, ] vbo = np.array(vbo).astype(np.float32) vbo = [(gl.buffer(vbo), "2f", "in_pos")] ibo = [0, 1, 2, 1, 2, 3] ibo = np.array(ibo).astype(np.int32) ibo = gl.buffer(ibo) vao = gl.vertex_array(program, vbo, ibo) return vao
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_rhombus(self, screen):\n pygame.gfxdraw.filled_polygon(screen, self.list_of_coordinates, self.color)\n\n return screen", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def draw_gri...
[ "0.6329539", "0.6251077", "0.6159504", "0.61296666", "0.6059617", "0.6056886", "0.60274506", "0.6013795", "0.5928783", "0.5917718", "0.59110874", "0.58925414", "0.5879362", "0.58776313", "0.58744633", "0.58570075", "0.58549136", "0.5837235", "0.5829144", "0.5827169", "0.58188...
0.0
-1
need better performance here
def serialize_buffer(cls, gl_buffer, w, h): data = gl_buffer.read() data = np.frombuffer(data, dtype=np.float32) data = data.reshape((h, w, 4)) data = np.multiply(data, 255.0) data = data.astype(np.uint8) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self):", "def _optimise(self):\n pass", "def apply(self) -> None:", "def apply(self) -> None:", "def map():", "def regular(self):", "def substantiate():", "def common(self):", "def transform(self):", "def process(self):", "def process(self):", "def process(self):", "def pre...
[ "0.5904686", "0.587094", "0.58542585", "0.58542585", "0.57282573", "0.5480052", "0.544143", "0.5374042", "0.5308655", "0.53024113", "0.53024113", "0.53024113", "0.52879596", "0.5280594", "0.5263961", "0.5251992", "0.5239149", "0.5239149", "0.5212692", "0.5189539", "0.51704985...
0.0
-1
simple compute shader run after screen rendering
def build_cs(self, gl): cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl")) u_time = None u_width = None u_height = None if "u_time" in cs: u_time = cs["u_time"] if "u_width" in cs: u_width = cs["u_width"] if "u_height" in cs: u_height = cs["u_height"] buf_in = gl.buffer(reserve=width * height * 4 * 4) buf_in.bind_to_storage_buffer(0) buf_out = gl.buffer(reserve=width * height * 4 * 4) buf_out.bind_to_storage_buffer(1) return cs, [u_time, u_width, u_height], [buf_in, buf_out]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _executeShader(self, node, threadsX, threadsY, threadsZ=1):\n sattr = node.get_attrib(ShaderAttrib)\n Globals.base.graphicsEngine.dispatch_compute(\n (threadsX, threadsY, threadsZ), sattr, Globals.base.win.get_gsg())", "def render( self, shader, mode, index ):\n location = sha...
[ "0.71154165", "0.64277923", "0.6419814", "0.6234535", "0.6087237", "0.60823435", "0.586072", "0.58073187", "0.5745648", "0.57310456", "0.5686501", "0.565925", "0.56536305", "0.5627507", "0.56140345", "0.5604499", "0.5579304", "0.55418664", "0.5501335", "0.54909116", "0.549046...
0.0
-1
called everytime any files under gl directory changes
def recompile(self): self.vaos = [] try: self.program, uniforms = self.build_prog(self.gl) self.u_time, self.u_width, self.u_height = uniforms vao = GLUtil.screen_vao(self.gl, self.program) self.vaos.append(vao) self.compute, uniforms, buffers = self.build_cs(self.gl) self.u_cstime, self.u_cswidth, self.u_csheight = uniforms self.buf_in, self.buf_out = buffers self.set_gpu_wh(width, height) self.gx, self.gy = int(width / 8), int(height / 8) self.set_gpu_time() log("[Renderer] shader recompiled.") except Exception as e: log(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_file_changed(self, path):\n\t\tpass", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n ...
[ "0.7321618", "0.67455846", "0.63352966", "0.6173503", "0.6163231", "0.6076398", "0.60359526", "0.6032375", "0.5989207", "0.5915403", "0.58932906", "0.58539456", "0.5828015", "0.5816045", "0.58157974", "0.58147573", "0.5802325", "0.5790458", "0.5786011", "0.57448286", "0.56976...
0.0
-1
called only once when start
def initializeGL(self): self.gl = mg.create_context() self.recompile() self.to_capture = False self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4") capture_framebuffer = self.gl.framebuffer([self.capture_texture]) self.capture_scope = self.gl.scope(capture_framebuffer) self.to_record = False self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4") record_framebuffer = self.gl.framebuffer([self.record_texture]) self.record_scope = self.gl.scope(record_framebuffer) self.recording = None self.to_capture_buffer_in = False self.to_capture_buffer_out = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_start(self):", "def started(self):", "def _start(self):", "def on_start(self):", "def on_start(self):", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass",...
[ "0.83904356", "0.8242063", "0.8004934", "0.79939187", "0.79939187", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7815382", "0.78118557", "0.7695339", "0.7695339", "0.7693047", "0.7675579", "0.7652083", "0.75488406"...
0.0
-1
get and store tweets based on a given function
def getter(collection, modulename=None, filename="AlltweetsNoOp.json"): count = 0 print "total number of tweets in this database is ", collection.find().count() # open a new file ### outfile = open(filename, "w") # according to the json list format outfile.write("[") if modulename == None: option = NoOp else: module = imp.load_source('module.name', modulename) option = module.check for tweet in collection.find(): count += 1 if count % 5000 == 0: print count if option(tweet): tweet.pop(u'_id', None) json.dump(tweet, outfile, indent = 4) outfile.write(",") # close all files outfile.seek(-1, 1) outfile.write("]") outfile.close() print "finish writing to the file"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n ...
[ "0.67840785", "0.64763457", "0.6372614", "0.63208246", "0.62932944", "0.6282456", "0.6270488", "0.62480295", "0.6231377", "0.62282085", "0.6203771", "0.6201833", "0.616992", "0.6169758", "0.61481005", "0.6143227", "0.61307067", "0.6097203", "0.6073542", "0.6066333", "0.604747...
0.0
-1
get and store tweets based on a given function
def randomprint(collection, modulename=None, num = 300): count = 0 print "total number of tweets in this database is ", collection.find().count() if modulename == None: option = NoOp else: module = imp.load_source('module.name', modulename) option = module.check # probability that one tweet will be printed out total = 28360 # number of tweets printed accu = 0 for tweet in collection.find(): count += 1 # if count % 5000 == 0: # print count if option(tweet): prob = random.randint(1, 2) if prob <= total - num: accu += 1 print " --------------------------- Tweet ", accu, " ---------------------------" print tweet[u'text'].encode("utf-8", "ignore") print tweet[u'created_at'] print tweet[u'user'][u'screen_name'].encode("utf-8", "ignore"), " | ", tweet[u'user'][u'name'].encode("utf-8", "ignore"), " | ", tweet[u'user'][u'description'].encode("utf-8", "ignore") print print "finish searching all tweets"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n ...
[ "0.6786159", "0.6478341", "0.63744974", "0.6324953", "0.6295513", "0.6283256", "0.6272268", "0.6249374", "0.6233402", "0.62311983", "0.62054044", "0.62021184", "0.61722195", "0.6171884", "0.61485344", "0.6144559", "0.6132322", "0.6097602", "0.6076344", "0.6068674", "0.6049509...
0.0
-1
Create a new database from attributes
def create_database(self, instance, **attrs): instance = self._get_resource(_instance.Instance, instance) return self._create( _database.Database, instance_id=instance.id, **attrs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db(self):", "def create():\n\tcreate_db()", "def generate_database_object(**kwargs):\n return app.database.Database(\"test.db\", **kwargs)", "def make_db():\n\n db.create_all()", "def create_database():\n create_db(app)", "def create():\n\n from slicr.extensions import db\n\n cl...
[ "0.769547", "0.74889106", "0.73719215", "0.7142584", "0.70426226", "0.7019294", "0.7007661", "0.69997644", "0.6994348", "0.6984608", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", ...
0.7182269
3
Find a single database
def find_database(self, name_or_id, instance, ignore_missing=True): instance = self._get_resource(_instance.Instance, instance) return self._find( _database.Database, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def searchDatabase(self, name:...
[ "0.7311903", "0.6849391", "0.6839373", "0.6820903", "0.6808621", "0.67496127", "0.67408377", "0.6706965", "0.6702317", "0.6701318", "0.66828966", "0.6620947", "0.65982574", "0.6543755", "0.6513072", "0.6468301", "0.64516336", "0.6444562", "0.640927", "0.6397873", "0.63197", ...
0.7636754
0
Return a generator of databases
def databases(self, instance, **query): instance = self._get_resource(_instance.Instance, instance) return self._list(_database.Database, instance_id=instance.id, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_databases ():\n return _dbobjects[:]", "def database():\n db = Database()\n yield db\n db.close()", "def get_databases(self):\n pass", "def get_db() -> Generator:\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()", "def get_db() -> Iterator[Se...
[ "0.7408082", "0.72182983", "0.71949655", "0.7083334", "0.69460434", "0.6883696", "0.68634665", "0.6770623", "0.6742591", "0.6707848", "0.6675422", "0.66511965", "0.65341586", "0.6520894", "0.6501018", "0.64990884", "0.64912605", "0.6466524", "0.6464015", "0.6418823", "0.63651...
0.6240589
28
Get a single database
def get_database(self, database, instance=None): return self._get(_database.Database, database)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "...
[ "0.8189091", "0.7899076", "0.7844062", "0.7762001", "0.7726166", "0.772395", "0.7723282", "0.7685286", "0.765148", "0.762424", "0.7616151", "0.76047456", "0.7597356", "0.7583495", "0.7570153", "0.75534976", "0.74211323", "0.74180853", "0.7410544", "0.7392949", "0.7318623", ...
0.7978113
1
Find a single flavor
def find_flavor(self, name_or_id, ignore_missing=True): return self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def find_flavor(self, name_or_id, ignore_missing=False):\n return self._find(_flavor.Flavor, name_or_id,\n ignore_missing=igno...
[ "0.8060121", "0.78982663", "0.73436314", "0.7281482", "0.7281482", "0.7015246", "0.7004508", "0.68786645", "0.67651623", "0.67304444", "0.66880435", "0.66849285", "0.65951335", "0.6554591", "0.64654094", "0.6302968", "0.6265922", "0.62596685", "0.62154466", "0.6201769", "0.61...
0.7353765
2
Get a single flavor
def get_flavor(self, flavor): return self._get(_flavor.Flavor, flavor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)", "def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (s...
[ "0.82900196", "0.8277593", "0.8069578", "0.789696", "0.7692458", "0.7663924", "0.75685847", "0.7417431", "0.7335429", "0.72927356", "0.71877533", "0.71117985", "0.69786406", "0.6909632", "0.6736771", "0.67233574", "0.67233574", "0.66672426", "0.66635484", "0.6631124", "0.6613...
0.86241716
1
Return a generator of flavors
def flavors(self, **query): return self._list(_flavor.Flavor, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flavors(self, **kwargs):\n raise NotImplementedError", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get...
[ "0.74028367", "0.6779597", "0.6729681", "0.64055777", "0.6195069", "0.59106874", "0.59053063", "0.58540213", "0.5842893", "0.5784032", "0.57393897", "0.5738477", "0.5722725", "0.5588289", "0.5528176", "0.54953", "0.5488104", "0.54655933", "0.54139316", "0.5388655", "0.5348195...
0.679078
1
Create a new instance from attributes
def create_instance(self, **attrs): return self._create(_instance.Instance, **attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def __init__(self, **attributes):\n self.set(**attributes)", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():...
[ "0.7391123", "0.71716684", "0.7001039", "0.6807363", "0.6790451", "0.67887485", "0.67748046", "0.67689526", "0.6607367", "0.6607367", "0.660078", "0.65276855", "0.65238714", "0.65238714", "0.64978755", "0.64805484", "0.64737403", "0.64229757", "0.6416804", "0.6346199", "0.632...
0.63399637
20
Find a single instance
def find_instance(self, name_or_id, ignore_missing=True): return self._find( _instance.Instance, name_or_id, ignore_missing=ignore_missing )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def find(self, **kwargs):\n rl = sel...
[ "0.7728593", "0.71363354", "0.7030264", "0.6881119", "0.68568933", "0.68549156", "0.6848733", "0.6823757", "0.6800267", "0.6779677", "0.67150754", "0.6676291", "0.6646991", "0.6642961", "0.65998936", "0.6545709", "0.6520098", "0.6490596", "0.64855844", "0.6475394", "0.6446852...
0.7567719
1
Get a single instance
def get_instance(self, instance): return self._get(_instance.Instance, instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetInstance():\n pass", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)", "def _get_instance(self):\n ...
[ "0.7694641", "0.75701267", "0.75683355", "0.7447133", "0.7252449", "0.7242679", "0.7239021", "0.7208093", "0.71733356", "0.71482056", "0.71482056", "0.7138967", "0.70699036", "0.7006299", "0.69852996", "0.69710827", "0.69649774", "0.69454527", "0.69406265", "0.6925822", "0.68...
0.7944661
0
Return a generator of instances
def instances(self, **query): return self._list(_instance.Instance, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead", "def __iter__(self):\n return self.new_generator()", "def instances(self):\n ...
[ "0.7728519", "0.764147", "0.73204434", "0.70539004", "0.6947389", "0.6733554", "0.67039716", "0.67004335", "0.67004335", "0.6692241", "0.66860837", "0.66724557", "0.6664278", "0.6646873", "0.6614654", "0.6586283", "0.6565416", "0.6558471", "0.65531677", "0.6528782", "0.649612...
0.0
-1
Create a new user from attributes
def create_user(self, instance, **attrs): instance = self._get_resource(_instance.Instance, instance) return self._create(_user.User, instance_id=instance.id, **attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(cls, **data):\n user = cls()\n for attribute in data:\n if hasattr(user, attribute):\n setattr(user, attribute, data[attribute])\n user.password = data[\"password\"]\n db.session.add(user)\n return user", "def create_user(user, first_name, l...
[ "0.7783138", "0.7630757", "0.7628778", "0.7590039", "0.7514446", "0.7495538", "0.7476639", "0.7472641", "0.7424796", "0.74176484", "0.73413366", "0.73327565", "0.73321164", "0.7310507", "0.72872263", "0.7284248", "0.7278472", "0.7245254", "0.7237018", "0.72202486", "0.7214713...
0.70159054
45
Find a single user
def find_user(self, name_or_id, instance, ignore_missing=True): instance = self._get_resource(_instance.Instance, instance) return self._find( _user.User, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_user(user_name):\n return User.find_by_user_name(user_name)", "def find_user(self, value, key=\"name\"):\n if not value:\n return\n\n if key.lower() not in (\"name\", \"id\", \"email\"):\n raise ValueError()\n\n if key.lower() == \"id\":\n return ...
[ "0.79601854", "0.7887714", "0.78640777", "0.77059114", "0.766751", "0.76567847", "0.7623235", "0.75812227", "0.7565286", "0.75438184", "0.7530661", "0.75195897", "0.75174814", "0.747526", "0.7461013", "0.74320114", "0.7411608", "0.74057275", "0.73754036", "0.7375327", "0.7373...
0.731691
25
Return a generator of users
def users(self, instance, **query): instance = self._get_resource(_instance.Instance, instance) return self._list(_user.User, instance_id=instance.id, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def get_users(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_...
[ "0.7804074", "0.7264213", "0.724916", "0.72419035", "0.7206268", "0.71832883", "0.714184", "0.70211744", "0.6948952", "0.6935378", "0.69063926", "0.6886542", "0.6870021", "0.68204033", "0.6802641", "0.67643344", "0.6726554", "0.6724001", "0.6705792", "0.6705792", "0.6705792",...
0.0
-1
Get a single user
def get_user(self, user, instance=None): instance = self._get_resource(_instance.Instance, instance) return self._get(_user.User, user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n\n return user", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n ...
[ "0.8360361", "0.83258814", "0.8310465", "0.8215789", "0.8177496", "0.8172062", "0.8166535", "0.8159785", "0.81516296", "0.81001353", "0.80638343", "0.8059556", "0.8051115", "0.8048663", "0.8047433", "0.8014368", "0.80115867", "0.79899853", "0.7974474", "0.79706466", "0.795660...
0.75470227
77
Save the post data when creating a new bucketlist.
def perform_create(self, serializer): serializer.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, user):\n # parse request data\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # create bucketlist and save bucketlist\n bucketlist = Bucketlist(name=bucke...
[ "0.7236179", "0.6786587", "0.6767346", "0.66732574", "0.6173778", "0.6169759", "0.6156117", "0.6154787", "0.6003337", "0.5966829", "0.5952784", "0.59198797", "0.5882262", "0.58755314", "0.58628845", "0.5862723", "0.5828422", "0.5778974", "0.5684983", "0.56759155", "0.562585",...
0.0
-1
Add a user to 'prospects' unless the user is the campaign owner or is already linked to 'workers', 'prospects', or 'blacklist'. Also decline to add prospects when the campaign is not active. user A TcsUser instance to link to 'prospects'
def addProspect(self, user): if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \ and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists(): self.prospects.add(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n ...
[ "0.57529235", "0.57101214", "0.56048286", "0.549154", "0.52644926", "0.516734", "0.51544005", "0.50623524", "0.49674854", "0.49344334", "0.48778322", "0.48658597", "0.48227632", "0.481681", "0.4816524", "0.48090467", "0.48052084", "0.47986007", "0.4791924", "0.47789344", "0.4...
0.8156176
0
Remove the user from the lists of workers and prospects, if applicable, and add the user to the blacklist. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to the blacklist
def addToBlacklist(self, user): if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists(): self.blacklist.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An...
[ "0.74218553", "0.7352311", "0.7122453", "0.67325133", "0.6435446", "0.6364521", "0.6361342", "0.6294082", "0.6188603", "0.58658487", "0.5860538", "0.58456814", "0.58305186", "0.58287203", "0.57763344", "0.57541144", "0.56883603", "0.5684058", "0.55988926", "0.55928737", "0.55...
0.81987846
0
Remove the user from 'prospects' and 'blacklist', if applicable, and add the user to 'workers'. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to workers
def addWorker(self, user): if (user != self.owner) and not self.workers.filter(pk=user.id).exists(): self.workers.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.blacklist.filter(pk=user.id).exists(): self.blacklist.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists(...
[ "0.7682945", "0.6766191", "0.6598563", "0.62174505", "0.60659224", "0.5727882", "0.56629395", "0.5472015", "0.54637945", "0.54543555", "0.53860176", "0.53835195", "0.53404295", "0.5299187", "0.5298164", "0.5247873", "0.5234306", "0.5230454", "0.52074", "0.51751196", "0.516473...
0.7764532
0
Return True if the campaign authorizes the user to provide data to the campaign. This is the case if the user owns or works for the campaign. Otherwise, return False.
def authorizes(self, user): return self.owner == user or self.workers.filter(pk=user.id).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authorized(self):\n return self.authorization is not None", "def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False", "def has_permission(self, request, view):\n user = request.user\n try:\n ...
[ "0.71512544", "0.6903803", "0.6882612", "0.6824171", "0.6759655", "0.671842", "0.67081565", "0.6686441", "0.66475916", "0.6640897", "0.6619891", "0.66168815", "0.6604757", "0.6577445", "0.65759784", "0.65525156", "0.65457594", "0.65178764", "0.6516392", "0.65048504", "0.64949...
0.68715215
3
Return campaign workers who do not already own a campaign.
def getOwnerOptions(self): # TODO return self.workers.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crew_needing_reports(self):\n reports = self.ccreport_set.all().values_list('crew_chief', flat=True)\n return self.ccinstances.exclude(crew_chief__in=reports)", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.appen...
[ "0.6192415", "0.6118456", "0.5496211", "0.5446709", "0.5353705", "0.5315112", "0.5248027", "0.5240531", "0.5227237", "0.5211369", "0.5207625", "0.5090434", "0.50866437", "0.507655", "0.5067131", "0.50648826", "0.5036302", "0.5027716", "0.50187", "0.50136185", "0.5004204", "...
0.4997855
21
Return active constituent voters who have not been contacted since the last election and have not been served to a supporter in the last two days. Don't limit the size of the result set here; let APIs do that.
def getVotersToContact(self): two_days_ago = date.today() - timedelta(2) year_ago = date.today() - timedelta(365) return self.voters.filter( Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago), Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago), campaignstovoters__is_active=True, is_active=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') ...
[ "0.6028861", "0.5758767", "0.5515115", "0.54826623", "0.5425473", "0.5403842", "0.53461355", "0.5314975", "0.5251976", "0.5220554", "0.5220554", "0.5193499", "0.51809436", "0.51725066", "0.517217", "0.5158285", "0.51561767", "0.5152371", "0.51470834", "0.51421833", "0.5122205...
0.65486276
0
Return active constituent voters with valid phone contact information who have not been contacted since the last election. Don't limit the size of the result set here; let APIs do that.
def getVotersToDial(self): return self.getVotersToContact().exclude( (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)), (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_co...
[ "0.6920536", "0.57854915", "0.5222731", "0.50926673", "0.5040607", "0.502312", "0.50023216", "0.48708686", "0.48509404", "0.48265207", "0.48217788", "0.48114735", "0.47936308", "0.47893497", "0.47839564", "0.47823417", "0.47818604", "0.4778862", "0.47737798", "0.47727492", "0...
0.671149
1
Return active constituent voters in proximity to given latitude and longitude coordinates. (Database contrainsts require all rows in the voters table to have valid address information.) Don't limit the size of the result set here; let APIs do that.
def getVotersDoorToDoor(self, latitude, longitude): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def users_nearby(self, meters):\n location = Location.objects.get(id=self.most_recent_location_id)\n lng = location.position['coordinates'][0]\n lat = location.position['coordinates'][1]\n\n nearby_locations = Location.objects(position__near=[lng, lat], position__max_distance=meters)\n\...
[ "0.5678817", "0.5506488", "0.54521376", "0.5356175", "0.5350751", "0.5323278", "0.5296326", "0.5275836", "0.5268494", "0.52337134", "0.5233509", "0.5190683", "0.51781356", "0.51566523", "0.513223", "0.51218677", "0.51183736", "0.51013875", "0.50991535", "0.5097977", "0.508264...
0.5194998
11
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user...
[ "0.6894938", "0.68542784", "0.685336", "0.67998946", "0.6638343", "0.6393456", "0.63634795", "0.6293484", "0.6282741", "0.6280759", "0.6247314", "0.62332475", "0.6231962", "0.6227879", "0.61912465", "0.6178004", "0.6158775", "0.6148308", "0.6134449", "0.6126714", "0.6120114",...
0.7817377
0
Return the number of voters a user has contacted for the campaign.
def voterContactCount(self, user): return self.votercontact_set.filter(user=user).count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def present_voter_cnt(self):\n\n re...
[ "0.6371157", "0.63698155", "0.63018495", "0.60611516", "0.6054053", "0.6027923", "0.6018428", "0.59854454", "0.5949555", "0.59482515", "0.5914123", "0.58427924", "0.57395315", "0.57395315", "0.57389605", "0.5730802", "0.57079923", "0.57007176", "0.56978345", "0.5668029", "0.5...
0.7715844
0
Returns an indented representation of the nested dictionary.
def pretty_repr(self, num_spaces=4): def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n outp...
[ "0.73564094", "0.7016583", "0.7004065", "0.69742304", "0.69219863", "0.6862406", "0.68234503", "0.6813462", "0.6663069", "0.6650337", "0.66487944", "0.6608814", "0.65994126", "0.65836185", "0.6566666", "0.6555802", "0.6501829", "0.6487838", "0.6477041", "0.6438375", "0.643526...
0.70187014
1
Create a new FrozenDict with additional or replaced entries.
def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = Frozen...
[ "0.7146128", "0.68393993", "0.65283275", "0.6507282", "0.5816068", "0.5484915", "0.5304193", "0.5286141", "0.5152492", "0.5124631", "0.5105132", "0.50970227", "0.5096764", "0.5051437", "0.5042281", "0.49775112", "0.49529138", "0.4934736", "0.49311805", "0.48632023", "0.485795...
0.70710015
1
Create a new FrozenDict where one entry is removed.
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less perf...
[ "0.6839586", "0.6433712", "0.63349175", "0.630568", "0.60893214", "0.59897816", "0.5919276", "0.5696106", "0.56297266", "0.5533009", "0.5489603", "0.54769456", "0.54426163", "0.53937614", "0.5389322", "0.5372033", "0.5324981", "0.53037596", "0.53020614", "0.52977496", "0.5270...
0.57714397
7
Deep copy unfrozen dicts to make the dictionary FrozenDict safe.
def _prepare_freeze(xs: Any) -> Any: if isinstance(xs, FrozenDict): # we can safely ref share the internal state of a FrozenDict # because it is immutable. return xs._dict # pylint: disable=protected-access if not isinstance(xs, dict): # return a leaf as is. return xs # recursively copy dictionary to avoid ref sharing return {key: _prepare_freeze(val) for key, val in xs.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n ret...
[ "0.71733785", "0.7037678", "0.65864396", "0.6474158", "0.62573695", "0.6152454", "0.61428803", "0.6087053", "0.6052837", "0.60174745", "0.5997487", "0.59899086", "0.5918377", "0.5899443", "0.5898595", "0.5892593", "0.5735094", "0.572419", "0.5684948", "0.56120795", "0.5609732...
0.7742294
0
Freeze a nested dict. Makes a nested `dict` immutable by transforming it into `FrozenDict`.
def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]: return FrozenDict(xs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursive...
[ "0.7681707", "0.73136884", "0.6781864", "0.5990238", "0.5973389", "0.59243876", "0.57386374", "0.56837773", "0.5637036", "0.5456011", "0.5356702", "0.5356166", "0.5309256", "0.5258965", "0.52475625", "0.5228192", "0.5178104", "0.51654106", "0.5152976", "0.5144742", "0.5124128...
0.6944025
2
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursive...
[ "0.678866", "0.63871247", "0.63075334", "0.6243246", "0.61387753", "0.61324066", "0.5833921", "0.56678116", "0.5610028", "0.5581237", "0.547987", "0.528664", "0.5166779", "0.51488274", "0.5131935", "0.513004", "0.5127286", "0.51143354", "0.5108335", "0.5104038", "0.50658894",...
0.80311483
0
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.copy`.
def copy( x: Union[FrozenDict, Dict[str, Any]], add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict( {} ), ) -> Union[FrozenDict, Dict[str, Any]]: if isinstance(x, FrozenDict): return x.copy(add_or_replace) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x new_dict.update(add_or_replace) return new_dict raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal stat...
[ "0.7882248", "0.69423527", "0.6703827", "0.66988987", "0.65337425", "0.6381466", "0.6357913", "0.63407815", "0.63111323", "0.6289166", "0.6279252", "0.626923", "0.62294686", "0.61992794", "0.6063225", "0.60579437", "0.6037078", "0.6023079", "0.5997253", "0.59942675", "0.59496...
0.8017656
0
Create a new dict where one entry is removed. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pop`.
def pop( x: Union[FrozenDict, Dict[str, Any]], key: str ) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]: if isinstance(x, FrozenDict): return x.pop(key) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x value = new_dict.pop(key) return new_dict, value raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def dict_pop(d, key):\n return d.pop(key)", "def remove_element( self, dictionary, key):\n\n _dict ...
[ "0.70482343", "0.67293966", "0.6490357", "0.6465873", "0.61706996", "0.6144822", "0.6001672", "0.59998095", "0.5967065", "0.59639794", "0.5955744", "0.59308225", "0.588004", "0.58719283", "0.58055025", "0.57446265", "0.57119524", "0.56699175", "0.5650597", "0.56066054", "0.55...
0.7112404
0
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pretty_repr`. If x is any other dtype, this function will return `repr(x)`.
def pretty_repr(x: Any, num_spaces: int = 4) -> str: if isinstance(x, FrozenDict): return x.pretty_repr() else: def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return pretty_dict(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n ...
[ "0.7736835", "0.7247312", "0.7021937", "0.69802797", "0.69656223", "0.6918987", "0.6832074", "0.68211126", "0.67912984", "0.678275", "0.67453086", "0.6687921", "0.66458195", "0.654063", "0.6504709", "0.6453231", "0.64472985", "0.63632375", "0.6358067", "0.6356899", "0.6325422...
0.83444524
0
Generate instance masks for an image.
def load_mask(self, image_id, coco_offset=0): info = self.image_info[image_id] mask = np.zeros([info["height"], info["width"], len(self.json_data[info["id"]])], dtype=np.uint8) lbls = np.zeros(len(self.json_data[info["id"]]), dtype=np.int32) for idx, (mask_path, mask_info) in enumerate(self.json_data[info["id"]].items()): mask_class = mask_info["class"] mask[:,:,idx] = np.array(PIL.Image.open(mask_path), dtype=np.uint8) lbls[idx] = common.activity_classes_names.index(mask_class) + 1 + coco_offset # Return mask, and array of class IDs of each instance. Since we have # one class ID only, we return an array of 1s return mask.astype(np.bool), lbls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self...
[ "0.69253653", "0.6923163", "0.69147986", "0.68673575", "0.68427163", "0.6760219", "0.6754343", "0.6707345", "0.6699128", "0.66717273", "0.66254276", "0.660529", "0.6602349", "0.6544177", "0.6543895", "0.638877", "0.63362086", "0.6334344", "0.6327872", "0.630387", "0.6286461",...
0.59280497
55
Load a subset of the COCO dataset.
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None, class_map=None, return_coco=False, auto_download=False): if auto_download is True: self.auto_download(dataset_dir, subset, year) coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year)) if subset == "minival" or subset == "valminusminival": subset = "val" image_dir = "{}/{}{}".format(dataset_dir, subset, year) # Select class_ids from class_names: if class_names: class_ids = sorted(coco.getCatIds(catNms=class_names)) # Load all classes or a subset? if not class_ids: # All classes class_ids = sorted(coco.getCatIds()) # All images or a subset? if class_ids: image_ids = [] for id in class_ids: imgs = [] # list of images to add to image_ids # Select at most COCO_IMAGES_PER_OBJECT and select only the images # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them: for imgid in list(coco.getImgIds(catIds=[id])): if len(imgs) >= COCO_IMAGES_PER_OBJECT: break if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE: imgs.append(imgid) image_ids.extend(imgs) #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT]) # Remove duplicates image_ids = list(set(image_ids)) else: # All images image_ids = list(coco.imgs.keys()) # Add classes for i in class_ids: self.add_class("coco", i, coco.loadCats(i)[0]["name"]) # Add images for i in image_ids: #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))) self.add_image( "coco", image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]["width"], height=coco.imgs[i]["height"], annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=...
[ "0.62512195", "0.6004767", "0.5874558", "0.5784675", "0.57171506", "0.56779504", "0.5658244", "0.5657759", "0.56274396", "0.5618919", "0.5617201", "0.55943984", "0.55781955", "0.55633026", "0.55596197", "0.5551278", "0.54980606", "0.54674935", "0.5462153", "0.54568654", "0.54...
0.64328516
0
Download the COCO dataset/annotations if requested.
def auto_download(self, dataDir, dataType, dataYear): # Setup paths and file names if dataType == "minival" or dataType == "valminusminival": imgDir = "{}/{}{}".format(dataDir, "val", dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear) else: imgDir = "{}/{}{}".format(dataDir, dataType, dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear) # print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL) # Create main folder if it doesn't exist yet if not os.path.exists(dataDir): os.makedirs(dataDir) # Download images if not available locally if not os.path.exists(imgDir): os.makedirs(imgDir) print("Downloading images to " + imgZipFile + " ...") with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + imgZipFile) with zipfile.ZipFile(imgZipFile, "r") as zip_ref: zip_ref.extractall(dataDir) print("... done unzipping") print("Will use images in " + imgDir) # Setup annotations data paths annDir = "{}/annotations".format(dataDir) if dataType == "minival": annZipFile = "{}/instances_minival2014.json.zip".format(dataDir) annFile = "{}/instances_minival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0" unZipDir = annDir elif dataType == "valminusminival": annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir) annFile = "{}/instances_valminusminival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0" unZipDir = annDir else: annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear) annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear) annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear) unZipDir = dataDir # print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL) # Download annotations if not available locally if not os.path.exists(annDir): os.makedirs(annDir) if not os.path.exists(annFile): if not os.path.exists(annZipFile): print("Downloading zipped annotations to " + annZipFile + " ...") with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + annZipFile) with zipfile.ZipFile(annZipFile, "r") as zip_ref: zip_ref.extractall(unZipDir) print("... done unzipping") print("Will use annotations in " + annFile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, de...
[ "0.72907704", "0.71969664", "0.64465344", "0.633483", "0.61967856", "0.61923695", "0.59949607", "0.59754235", "0.5947754", "0.5928412", "0.5897793", "0.5720691", "0.5706716", "0.56930333", "0.56358683", "0.5610616", "0.5585786", "0.55818254", "0.553139", "0.55226374", "0.5510...
0.51952803
50
Load instance masks for the given image. Different datasets use different ways to store masks. This function converts the different mask format to one format in the form of a bitmap [height, width, instances].
def load_mask(self, image_id): # If not a COCO image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "coco": return super(ExtendedCocoDataset, self).load_mask(image_id, common.COCO_NUM_CLASSES) # NOTE: this calls ActivityDataset.load_mask() instance_masks = [] class_ids = [] annotations = self.image_info[image_id]["annotations"] # Build mask of shape [height, width, instance_count] and list # of class IDs that correspond to each channel of the mask. for annotation in annotations: class_id = self.map_source_class_id( "coco.{}".format(annotation['category_id'])) if class_id: m = self.annToMask(annotation, image_info["height"], image_info["width"]) # Some objects are so small that they're less than 1 pixel area # and end up rounded out. Skip those objects. if m.max() < 1: continue # Is it a crowd? If so, use a negative class ID. if annotation['iscrowd']: # Use negative class ID for crowds class_id *= -1 # For crowd masks, annToMask() sometimes returns a mask # smaller than the given dimensions. If so, resize it. if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]: m = np.ones([image_info["height"], image_info["width"]], dtype=bool) instance_masks.append(m) class_ids.append(class_id) # Pack instance masks into an array if class_ids: mask = np.stack(instance_masks, axis=2).astype(np.bool) class_ids = np.array(class_ids, dtype=np.int32) return mask, class_ids else: # Call super class to return an empty mask return super(CocoDataset, self).load_mask(image_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_di...
[ "0.75807637", "0.75707793", "0.7464837", "0.74508286", "0.73990464", "0.7395329", "0.73903126", "0.733224", "0.72805625", "0.72684896", "0.72458494", "0.7228455", "0.7193414", "0.71730536", "0.7096771", "0.70429015", "0.69910985", "0.688985", "0.6886687", "0.6880056", "0.6802...
0.6600161
30
Return a link to the image in the COCO Website.
def image_reference(self, image_id): info = self.image_info[image_id] if info["source"] == "coco": return "http://cocodataset.org/#explore?id={}".format(info["id"]) else: super(CocoDataset, self).image_reference(image_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_comic_url(session, response):\n soup = bs(response.text, 'lxml')\n for div in soup.find_all('div', class_=\"img-comic-container\"):\n for a in div.find_all('a', class_=\"img-comic-link\"):\n for img in a.find_all('img', src=True):\n return \"https:\" + img['src'...
[ "0.7119464", "0.6980274", "0.67780954", "0.672729", "0.65255696", "0.6525551", "0.65081525", "0.64593345", "0.64593345", "0.64593345", "0.64545566", "0.64158964", "0.64125633", "0.62915957", "0.62915957", "0.62628806", "0.62458694", "0.62403053", "0.6228512", "0.6224288", "0....
0.69248754
2
Convert annotation which can be polygons, uncompressed RLE to RLE.
def annToRLE(self, ann, height, width): segm = ann['segmentation'] if isinstance(segm, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, height, width) rle = maskUtils.merge(rles) elif isinstance(segm['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(segm, height, width) else: # rle rle = ann['segmentation'] return rle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annToRLE(self, ann):\n t = self.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n ...
[ "0.73268646", "0.730984", "0.72336936", "0.6892055", "0.6486158", "0.60719514", "0.58431333", "0.56835", "0.56191874", "0.5562281", "0.5433442", "0.54270655", "0.53969246", "0.53223056", "0.52950984", "0.5280965", "0.5236743", "0.52261084", "0.52257967", "0.5216231", "0.51639...
0.7293852
3
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
def annToMask(self, ann, height, width): rle = self.annToRLE(ann, height, width) m = maskUtils.decode(rle) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annToMask(self, ann):\n rle = self.annToRLE(ann)\n m = maskUtils.decode(rle)\n return m", "def annToMask(ann, height, width):\n rle = annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(an...
[ "0.64569485", "0.6399482", "0.6213335", "0.605465", "0.6021838", "0.5922467", "0.58719254", "0.57193065", "0.57102245", "0.5670913", "0.5670913", "0.5670913", "0.5657575", "0.5603988", "0.5571696", "0.5569111", "0.55674887", "0.5559433", "0.5557507", "0.55520123", "0.55243313...
0.6287126
4
Normalize the feature matrix for training, store the normal mean & normal min
def normalize(self, feature_matrix): if len(feature_matrix) > 0: nmin = [1000000 for _ in range(len(feature_matrix[0]))] nsum = [0 for _ in range(len(feature_matrix[0]))] for r in feature_matrix: for c in range(len(r)): nmin[c] = min(nmin[c], r[c]) nsum[c] += r[c] self.norm_mean = map(lambda x: float(x)/float(len(feature_matrix)), nsum) self.norm_min = nmin return self.apply_normal(feature_matrix) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def feature_normalize(X):\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=...
[ "0.78171694", "0.77575696", "0.77201444", "0.77085596", "0.7680281", "0.7661087", "0.76458883", "0.762358", "0.74592733", "0.740253", "0.7402252", "0.7352509", "0.7277138", "0.7251356", "0.7241375", "0.7223175", "0.7204997", "0.7183116", "0.7146349", "0.7146349", "0.7142496",...
0.7496143
8
May not fit the message content limit
def fancy_traceback(exc: Exception) -> str: text = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__)) return f"```py\n{text[-4086:]}\n```"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_long_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=5)\n\n self.assertLessEqual(len(message_displayed), 5)\n self.assertEqual(message_displayed, \"fe...\")", "def limit_size(msg, max_size, trunc_symbol=\"...\"):\n if len...
[ "0.6797087", "0.6786985", "0.66775584", "0.6666431", "0.6509975", "0.64643615", "0.64408034", "0.6314759", "0.62594056", "0.62594056", "0.62315077", "0.6188969", "0.6176606", "0.6108867", "0.6095591", "0.6054365", "0.60128444", "0.5996038", "0.5942682", "0.5922216", "0.592221...
0.0
-1
Solution for part one.
def solve_part_one(self): password = "" index = 0 while len(password) < 8: (s, found_index) = self.find_next_hash(index) password += s[5] index = found_index + 1 return password
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task4_1(self):\n\n pass", "def exo2():", "def task4(self):\n\n pass", "def substantiate():", "def apply(self) -> None:", "def apply(self) -> None:", "def support(self):", "def mezclar_bolsa(self):", "def solve(self):", "def solvate(self):\n\n pass", "def falcon():", "...
[ "0.67894316", "0.6702227", "0.64681834", "0.6225672", "0.62181926", "0.62181926", "0.6214918", "0.62091845", "0.61323327", "0.6128199", "0.6067991", "0.60675985", "0.6043714", "0.602853", "0.60285074", "0.60285074", "0.60218054", "0.6005739", "0.5983086", "0.5963465", "0.5963...
0.0
-1
Solution for part two.
def solve_part_two(self): password = list("XXXXXXXX") index = 0 counter = 0 while counter < 8: (s, found_index) = self.find_next_hash(index) index = found_index + 1 offset = ord(s[5]) - ord("0") # Offset invalid or password character already set previously? if offset >= 8 or password[offset] != "X": continue password[offset] = s[6] counter += 1 return "".join(password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_113():\r\n pass", "def exo2():", "def exercise_b2_82():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_107():\...
[ "0.6879843", "0.6871387", "0.6860534", "0.6821481", "0.6766025", "0.67570114", "0.6722933", "0.66447437", "0.6609891", "0.65726656", "0.6570282", "0.65633136", "0.6554844", "0.6482532", "0.6472665", "0.64377874", "0.6428559", "0.6427989", "0.63757336", "0.6357774", "0.6328711...
0.0
-1
Updates this store's current state with incoming data from the network. data should be a mapping containing 'metacontacts', 'order', and 'info' structures (see comment at top of file)
def update_data(self, data): rebuild = False # This method needs to substitute some defaultdicts for the normal # dictionaries that come back from the server. # Metacontact information #if data['metacontacts'] mc_dict = data.get('metacontacts', {}) if not isinstance(mc_dict, dict): log.critical('invalid metacontacts dictionary') mc_dict = {} # Contact information like SMS numbers and email addresses. self.info = defaultdict(dict) si = self.info if 'info' in data: for (k, v) in data['info'].iteritems(): if isinstance(k, str): cmpk = k.decode('utf8') else: cmpk = k if not isinstance(cmpk, unicode): continue if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot) for prot in protocols.iterkeys())): if any(v.values()): si[k] = v for c, v in si.iteritems(): for attr in ('email', 'sms'): if attr in v: self.contact_info_changed(c, attr, v[attr]) self.metacontacts = MetaContactManager(self, mc_dict) if hasattr(self, 'new_sorter'): on_thread('sorter').call(self.new_sorter.removeAllContacts) rebuild = True # Manual ordering of groups try: self.order = deepcopy(data['order']) self.order['groups'] = list(oset(self.order['groups'])) contacts = self._filtered_contacts() self.order['contacts'] = defaultdict(list) self.order['contacts'].update(contacts) except Exception: log.critical('error receiving order') self._init_order() # note: loading tofrom data from the network is deprecated. this data # now goes out to disk. see save/load_local_data if 'tofrom' in data and isinstance(data['tofrom'], dict) and \ 'im' in data['tofrom'] and 'email' in data['tofrom']: self.dispatch.set_tofrom(deepcopy(data['tofrom'])) if rebuild: self.rebuild() self.update_order()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()", "def update(self, data):\n logging.info('update state', data)\n self._client.update_state(data)\n\n ...
[ "0.6476063", "0.62110347", "0.61491877", "0.6079043", "0.6079043", "0.6079043", "0.6079043", "0.60150313", "0.59856397", "0.592851", "0.5848428", "0.58295083", "0.58125436", "0.579534", "0.5732395", "0.5716034", "0.56988144", "0.5688092", "0.5683766", "0.5636451", "0.5636451"...
0.70325893
0
translates an rgb tuple of int to a tkinter friendly color code
def _from_rgb(self, rgb): return "#%02x%02x%02x" % rgb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def int2color_tuple(x):\n red_val = int(1000 * x % 255)\n green_val = int(10000 * x % 255)\n blue_val = int(100000 * x % 255)\n return red_val, green_val, blue_val", "def matplotlib_rgb_color(rgb_color):\r\n return tuple([i / 255. for i in rgb_color])", "def translate_rgb(rgb_tuple):\n mapped...
[ "0.7461026", "0.70466757", "0.70459116", "0.70223033", "0.69848394", "0.69848394", "0.69848394", "0.69848394", "0.6970065", "0.6949212", "0.6946806", "0.6917932", "0.68870723", "0.68848807", "0.6877446", "0.6874237", "0.68409014", "0.68385947", "0.68327475", "0.6821566", "0.6...
0.7030737
3
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
def __init__(self,*args): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signa...
[ "0.7455761", "0.7428699", "0.7428699", "0.7428699", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", ...
0.0
-1
Read file returns read file as string
def rSeqFile(FilePath): f=open(FilePath, 'r') #check if file open if f.mode == 'r': return(f.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", ...
[ "0.8561569", "0.81707746", "0.8130362", "0.8053142", "0.8042461", "0.8016344", "0.80138814", "0.80110985", "0.8004594", "0.7972023", "0.79629755", "0.796143", "0.79515415", "0.7903697", "0.7901838", "0.7858624", "0.7825868", "0.7805045", "0.7800547", "0.77981794", "0.7788316"...
0.0
-1
Tidying Lines preparation for process, delete empty lines, split lines returns list of lines
def TidyLines(SeqFile): TSeqs = SeqFile.splitlines() TSeqs = list(filter(None,TSeqs)) return(TSeqs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def clean_lines(lines):\n _lines = []\n for l in lines:\n l = l.strip().rstrip()\n if len(l) > 0:\n _lines.append(l)\n return _lines", "def __format_lines(cls...
[ "0.7803936", "0.7344712", "0.72013485", "0.7061363", "0.70215124", "0.6967967", "0.6914489", "0.6854614", "0.66259325", "0.6553807", "0.6536147", "0.65114903", "0.65092206", "0.6506624", "0.64733094", "0.64733094", "0.6465733", "0.6464278", "0.6462324", "0.6399896", "0.639333...
0.6184828
31
Error in label only whitespace allowed, no tabs if checked label differs, raise an error
def CheckLabel(Line): for i in Line: if i == '\t': #can't detect leading tabs, stops at the first \ raise InputError(Line,"malformed input") elif i != ' ': break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def checkLabel(label):\n\n label = str(label)\n if not label:\n raise ValueError('label cannot be empty string')\n\n label = str(label)\n\n if not label:\...
[ "0.67585087", "0.63081664", "0.6136866", "0.6121706", "0.6079675", "0.6045867", "0.5986271", "0.5970314", "0.5936593", "0.5910567", "0.58990806", "0.58863115", "0.5881466", "0.5871345", "0.58665997", "0.5764735", "0.57456005", "0.5732533", "0.5719328", "0.57080424", "0.569958...
0.7484639
0
Errors in sequence Checking sequence for allowed characters in sequence. only A,C,G,T if checked sequence differs, raise an error
def CheckSeq(Seq): OkNucleo = ("A", "C", "G", "T") for i in Seq: if i not in OkNucleo: raise InputError(Seq,"malformed input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)", "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(r\"[.-@|\\s| -)|z...
[ "0.72993267", "0.7268155", "0.7225839", "0.70450157", "0.7042472", "0.666945", "0.6569683", "0.65460646", "0.6537845", "0.6512465", "0.64319974", "0.63730264", "0.6364683", "0.6364558", "0.63401854", "0.6320243", "0.6189944", "0.61825746", "0.6175934", "0.61738986", "0.615814...
0.7165377
3
parsing a given text file containing labels and sequences load file, tidy it, process each line in the file return the labels and sequences as list[tuple(string,string)]
def ParseSeqFile(FilePath): SeqFile = rSeqFile(FilePath) TidyFile = TidyLines(SeqFile) result = [] for line in TidyFile: t = ( ProcessLine(line) ) result.append(t) return(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n ...
[ "0.67697734", "0.6637289", "0.65705514", "0.65653694", "0.6479006", "0.64705706", "0.64496464", "0.6351888", "0.6281873", "0.6273862", "0.62096", "0.6180452", "0.6150285", "0.6150257", "0.61439294", "0.612267", "0.6109716", "0.6100851", "0.60979813", "0.60872257", "0.60840577...
0.6663826
1
Parametrize tests for pytest to use a
def pytest_generate_tests_for_pyopencl_array_context(metafunc) -> None: from warnings import warn warn("pytest_generate_tests_for_pyopencl_array_context is deprecated. " "Use 'pytest_generate_tests = " "arraycontext.pytest_generate_tests_for_array_contexts" "([\"pyopencl-deprecated\"])' instead. " "pytest_generate_tests_for_pyopencl_array_context will stop working " "in 2022.", DeprecationWarning, stacklevel=2) pytest_generate_tests_for_array_contexts([ "pyopencl-deprecated", ], factory_arg_name="actx_factory")(metafunc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_generate_tests(metafunc):\n for param in ['env', 'browser', 'logging_level', 'env_file', 'name', 'jenkins_url', 'slack', 'output', 'email_retries',\n 'email_search_errors']:\n option_value = getattr(metafunc.config.option, param)\n if param in metafunc.fixturenames:\n ...
[ "0.69219553", "0.68522805", "0.68168706", "0.6790184", "0.6683413", "0.6502182", "0.6483538", "0.6483538", "0.64723897", "0.6470273", "0.6441867", "0.63857716", "0.63678086", "0.63678086", "0.6332544", "0.62898874", "0.6277179", "0.62686515", "0.62585187", "0.62364167", "0.62...
0.0
-1